summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFlorian Mayer <fmayer@google.com>2024-02-23 11:31:14 -0800
committerFlorian Mayer <fmayer@google.com>2024-02-23 11:31:14 -0800
commit886b4bc97b0ed5a5e041a0117a584182fc7989c1 (patch)
tree43cdc0e15e12c298c09251dda38e834e7e778049
parentaf8afe08ee20a04b2ccb363cac66aa02cfaecd02 (diff)
parent8d536f83545f071948888983e2db25ce23a8302d (diff)
Created using spr 1.3.4
-rw-r--r--.github/new-prs-labeler.yml20
-rw-r--r--bolt/include/bolt/Core/DIEBuilder.h8
-rw-r--r--bolt/include/bolt/Core/DebugData.h8
-rw-r--r--bolt/lib/Core/DIEBuilder.cpp20
-rw-r--r--bolt/lib/Core/DebugData.cpp3
-rw-r--r--bolt/lib/Rewrite/DWARFRewriter.cpp2
-rw-r--r--clang-tools-extra/clangd/refactor/Rename.cpp19
-rw-r--r--clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp8
-rw-r--r--clang-tools-extra/clangd/unittests/RenameTests.cpp138
-rw-r--r--clang-tools-extra/include-cleaner/lib/WalkAST.cpp5
-rw-r--r--clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp18
-rw-r--r--clang/docs/HLSL/ExpectedDifferences.rst110
-rw-r--r--clang/docs/HLSL/HLSLDocs.rst1
-rw-r--r--clang/docs/ReleaseNotes.rst49
-rw-r--r--clang/docs/StandardCPlusPlusModules.rst3
-rw-r--r--clang/docs/analyzer/checkers.rst27
-rw-r--r--clang/include/clang/Basic/DiagnosticFrontendKinds.td4
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td4
-rw-r--r--clang/include/clang/Basic/arm_sme.td2
-rw-r--r--clang/include/clang/Driver/ToolChain.h10
-rw-r--r--clang/include/clang/Frontend/ASTUnit.h23
-rw-r--r--clang/include/clang/Frontend/CompilerInstance.h3
-rw-r--r--clang/include/clang/Frontend/CompilerInvocation.h1
-rw-r--r--clang/include/clang/InstallAPI/HeaderFile.h3
-rw-r--r--clang/include/clang/Sema/Scope.h17
-rw-r--r--clang/include/clang/Sema/Sema.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h12
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp6
-rw-r--r--clang/lib/AST/Interp/Descriptor.h3
-rw-r--r--clang/lib/AST/Interp/Disasm.cpp79
-rw-r--r--clang/lib/AST/Interp/Interp.cpp4
-rw-r--r--clang/lib/AST/Interp/Program.cpp2
-rw-r--r--clang/lib/AST/Interp/Program.h3
-rw-r--r--clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp31
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp8
-rw-r--r--clang/lib/Basic/Targets/AArch64.h2
-rw-r--r--clang/lib/Basic/Targets/PPC.cpp10
-rw-r--r--clang/lib/Basic/Targets/PPC.h10
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp63
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp22
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h2
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp10
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.cpp11
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp43
-rw-r--r--clang/lib/Driver/OffloadBundler.cpp40
-rw-r--r--clang/lib/Driver/ToolChain.cpp23
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.cpp24
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp37
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp43
-rw-r--r--clang/lib/Driver/ToolChains/HIPUtility.cpp258
-rw-r--r--clang/lib/Driver/ToolChains/RISCVToolchain.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/RISCVToolchain.h2
-rw-r--r--clang/lib/Format/MacroCallReconstructor.cpp68
-rw-r--r--clang/lib/Format/Macros.h10
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp2
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp6
-rw-r--r--clang/lib/Format/UnwrappedLineParser.h2
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp15
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp2
-rw-r--r--clang/lib/Headers/arm_acle.h8
-rw-r--r--clang/lib/Headers/cpuid.h1
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp11
-rw-r--r--clang/lib/Parse/ParseOpenACC.cpp17
-rw-r--r--clang/lib/Sema/Scope.cpp1
-rw-r--r--clang/lib/Sema/SemaChecking.cpp8
-rw-r--r--clang/lib/Sema/SemaStmt.cpp22
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp18
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp42
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp39
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h3
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp16
-rw-r--r--clang/test/AST/ast-dump-using.cpp7
-rw-r--r--clang/test/Analysis/out-of-bounds.c8
-rw-r--r--clang/test/Analysis/stream-invalidate.c147
-rw-r--r--clang/test/Analysis/taint-diagnostic-visitor.c4
-rw-r--r--clang/test/Analysis/taint-generic.c11
-rw-r--r--clang/test/CMakeLists.txt1
-rw-r--r--clang/test/CXX/drs/dr27xx.cpp97
-rw-r--r--clang/test/CodeGen/aarch64-cpu-supports-target.c52
-rw-r--r--clang/test/CodeGen/aarch64-cpu-supports.c54
-rw-r--r--clang/test/CodeGen/aarch64-sme-inline-streaming-attrs.c47
-rw-r--r--clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_state_funs.c59
-rw-r--r--clang/test/CodeGen/aix-builtin-cpu-is.c71
-rw-r--r--clang/test/CodeGen/target-builtin-noerror.c1
-rw-r--r--clang/test/CodeGenCUDA/device-stub.cu10
-rw-r--r--clang/test/CodeGenCUDA/host-used-device-var.cu5
-rw-r--r--clang/test/Driver/Inputs/hip.h25
-rw-r--r--clang/test/Driver/arm-alignment.c15
-rw-r--r--clang/test/Driver/clang-offload-bundler.c13
-rw-r--r--clang/test/Driver/cuda-cross-compiling.c7
-rw-r--r--clang/test/Driver/hip-partial-link.hip97
-rw-r--r--clang/test/Driver/hip-toolchain-rdc.hip38
-rw-r--r--clang/test/Driver/linker-wrapper.c4
-rw-r--r--clang/test/Driver/openmp-offload-gpu.c20
-rw-r--r--clang/test/Driver/riscv-features.c8
-rw-r--r--clang/test/InstallAPI/basic.test (renamed from clang/test/InstallAPI/installapi-basic.test)0
-rw-r--r--clang/test/InstallAPI/driver-invalid-options.test (renamed from clang/test/InstallAPI/installapi-driver-invalid-options.test)0
-rw-r--r--clang/test/Modules/compile-pcm-with-pic.cppm21
-rw-r--r--clang/test/Parser/cxx-concepts-requires-clause.cpp27
-rw-r--r--clang/test/Preprocessor/has_builtin_cpuid.c5
-rw-r--r--clang/test/Sema/aarch64-cpu-supports.c26
-rw-r--r--clang/test/Sema/aarch64-sme-intrinsics/acle_sme_zero.c10
-rw-r--r--clang/test/Sema/aix-builtin-cpu-unsupports.c6
-rw-r--r--clang/test/Sema/builtin-cpu-supports.c2
-rw-r--r--clang/test/SemaCXX/PR20334-std_initializer_list_diagnosis_assertion.cpp3
-rw-r--r--clang/test/SemaCXX/PR68542.cpp14
-rw-r--r--clang/test/SemaCXX/cxx2a-ms-no-unique-address.cpp25
-rw-r--r--clang/test/SemaCXX/type-traits.cpp10
-rw-r--r--clang/test/SemaOpenACC/no-branch-in-out.c95
-rw-r--r--clang/tools/c-index-test/core_main.cpp2
-rw-r--r--clang/tools/clang-installapi/Options.cpp14
-rw-r--r--clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp4
-rw-r--r--clang/tools/libclang/CIndex.cpp2
-rw-r--r--clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp143
-rw-r--r--clang/unittests/Format/FormatTest.cpp6
-rw-r--r--clang/unittests/Format/FormatTestMacroExpansion.cpp21
-rw-r--r--clang/unittests/Format/MacroCallReconstructorTest.cpp129
-rw-r--r--clang/utils/TableGen/SveEmitter.cpp6
-rwxr-xr-xclang/www/cxx_dr_status.html2
-rw-r--r--compiler-rt/lib/builtins/cpu_model/x86.c6
-rw-r--r--compiler-rt/lib/hwasan/hwasan.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp17
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h31
-rw-r--r--compiler-rt/test/builtins/Unit/aarch64_cpu_features_test.c17
-rw-r--r--compiler-rt/test/builtins/Unit/cpu_model_test.c2
-rw-r--r--flang/lib/Evaluate/fold-integer.cpp1
-rw-r--r--flang/lib/Frontend/FrontendActions.cpp2
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp2
-rw-r--r--flang/test/Driver/omp-driver-offload.f9026
-rw-r--r--flang/test/Semantics/OpenMP/device-constructs.f9012
-rw-r--r--flang/test/Semantics/OpenMP/struct.f907
-rw-r--r--flang/tools/bbc/bbc.cpp3
-rw-r--r--libc/CMakeLists.txt46
-rw-r--r--libc/cmake/modules/LLVMLibCArchitectures.cmake28
-rw-r--r--libc/cmake/modules/LLVMLibCCheckMPFR.cmake2
-rw-r--r--libc/cmake/modules/LLVMLibCCompileOptionRules.cmake78
-rw-r--r--libc/cmake/modules/LLVMLibCHeaderRules.cmake2
-rw-r--r--libc/cmake/modules/LLVMLibCLibraryRules.cmake141
-rw-r--r--libc/cmake/modules/LLVMLibCObjectRules.cmake348
-rw-r--r--libc/cmake/modules/LLVMLibCTestRules.cmake53
-rw-r--r--libc/cmake/modules/prepare_libc_gpu_build.cmake115
-rw-r--r--libc/docs/gpu/using.rst33
-rw-r--r--libc/include/CMakeLists.txt6
-rw-r--r--libc/include/llvm-libc-macros/sys-queue-macros.h52
-rw-r--r--libc/lib/CMakeLists.txt47
-rw-r--r--libc/src/__support/File/CMakeLists.txt2
-rw-r--r--libc/src/__support/GPU/CMakeLists.txt2
-rw-r--r--libc/src/__support/OSUtil/CMakeLists.txt2
-rw-r--r--libc/src/__support/RPC/CMakeLists.txt2
-rw-r--r--libc/src/__support/fixed_point/CMakeLists.txt2
-rw-r--r--libc/src/__support/fixed_point/fx_bits.h78
-rw-r--r--libc/src/math/CMakeLists.txt16
-rw-r--r--libc/src/math/gpu/vendor/CMakeLists.txt1
-rw-r--r--libc/src/stdio/CMakeLists.txt2
-rw-r--r--libc/src/stdlib/CMakeLists.txt4
-rw-r--r--libc/src/string/CMakeLists.txt12
-rw-r--r--libc/startup/gpu/CMakeLists.txt35
-rw-r--r--libc/startup/gpu/amdgpu/CMakeLists.txt13
-rw-r--r--libc/startup/gpu/amdgpu/start.cpp6
-rw-r--r--libc/startup/gpu/nvptx/CMakeLists.txt9
-rw-r--r--libc/startup/linux/CMakeLists.txt3
-rw-r--r--libc/test/CMakeLists.txt6
-rw-r--r--libc/test/IntegrationTest/CMakeLists.txt16
-rw-r--r--libc/test/UnitTest/CMakeLists.txt2
-rw-r--r--libc/test/src/__support/CMakeLists.txt50
-rw-r--r--libc/test/src/__support/CPP/CMakeLists.txt2
-rw-r--r--libc/test/src/__support/FPUtil/fpbits_test.cpp2
-rw-r--r--libc/test/src/__support/File/CMakeLists.txt2
-rw-r--r--libc/test/src/__support/fixed_point/CMakeLists.txt16
-rw-r--r--libc/test/src/__support/fixed_point/fx_bits_test.cpp348
-rw-r--r--libc/test/src/errno/CMakeLists.txt2
-rw-r--r--libc/test/src/math/CMakeLists.txt20
-rw-r--r--libc/test/src/math/smoke/CMakeLists.txt8
-rw-r--r--libc/test/src/stdio/CMakeLists.txt2
-rw-r--r--libc/test/src/stdlib/CMakeLists.txt6
-rw-r--r--libc/test/utils/UnitTest/CMakeLists.txt2
-rw-r--r--libc/utils/CMakeLists.txt2
-rw-r--r--libc/utils/MPFRWrapper/CMakeLists.txt2
-rw-r--r--libc/utils/gpu/CMakeLists.txt4
-rw-r--r--libc/utils/gpu/loader/CMakeLists.txt48
-rw-r--r--libc/utils/gpu/loader/amdgpu/CMakeLists.txt6
-rw-r--r--libc/utils/gpu/loader/nvptx/CMakeLists.txt10
-rw-r--r--libc/utils/gpu/server/CMakeLists.txt9
-rw-r--r--libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp2
-rw-r--r--libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp2
-rw-r--r--libcxx/test/libcxx/language.support/support.dynamic/new_faligned_allocation.pass.cpp2
-rw-r--r--libcxx/test/libcxx/thread/thread.condition/PR30202_notify_from_pthread_created_thread.pass.cpp2
-rw-r--r--libcxx/test/libcxx/thread/thread.threads/thread.thread.this/sleep_for.pass.cpp2
-rw-r--r--libcxx/test/std/input.output/filesystems/fs.op.funcs/fs.op.remove_all/toctou.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/cmp/cmp.alg/strong_order_long_double.verify.cpp8
-rw-r--r--libcxx/test/std/language.support/support.dynamic/align_val_t.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/delete_align_val_t_replace.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.indirect.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.indirect.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/nodiscard.verify.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/delete_align_val_t_replace.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.replace.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.indirect.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.pass.cpp2
-rw-r--r--libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/nodiscard.verify.cpp2
-rw-r--r--libcxx/test/std/language.support/support.rtti/type.info/type_info.equal.pass.cpp4
-rw-r--r--libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_in.pass.cpp2
-rw-r--r--libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_length.pass.cpp2
-rw-r--r--libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_out.pass.cpp2
-rw-r--r--libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_in.pass.cpp2
-rw-r--r--libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_length.pass.cpp2
-rw-r--r--libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_out.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.functions/escaped_output.unicode.pass.cpp3
-rw-r--r--libcxx/test/std/utilities/format/format.range/format.range.fmtstr/format.pass.cpp3
-rw-r--r--libcxx/test/std/utilities/format/format.range/format.range.fmtstr/parse.pass.cpp3
-rw-r--r--libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp2
-rw-r--r--lldb/docs/resources/test.rst8
-rw-r--r--lldb/docs/testsuite/a-detailed-walkthrough.txt9
-rw-r--r--lldb/include/lldb/Interpreter/OptionValueSInt64.h4
-rw-r--r--lldb/include/lldb/Interpreter/OptionValueUInt64.h26
-rw-r--r--lldb/include/lldb/Interpreter/Options.h33
-rw-r--r--lldb/packages/Python/lldbsuite/test/README-TestSuite14
-rw-r--r--lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py47
-rw-r--r--lldb/source/Commands/CommandObjectBreakpoint.cpp37
-rw-r--r--lldb/source/Core/CoreProperties.td2
-rw-r--r--lldb/source/Core/Debugger.cpp6
-rw-r--r--lldb/source/Interpreter/OptionValueUInt64.cpp13
-rw-r--r--lldb/source/Interpreter/Options.cpp13
-rw-r--r--lldb/source/Target/Process.cpp11
-rw-r--r--lldb/test/API/commands/expression/call-throws/TestCallThatThrows.py2
-rw-r--r--lldb/test/API/commands/expression/dont_allow_jit/TestAllowJIT.py8
-rw-r--r--lldb/test/API/commands/settings/TestSettings.py15
-rw-r--r--lldb/test/API/commands/statistics/basic/TestStats.py34
-rw-r--r--lldb/test/API/commands/trace/TestTraceSave.py8
-rw-r--r--lldb/test/API/functionalities/breakpoint/address_breakpoints/TestBadAddressBreakpoints.py2
-rw-r--r--lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py12
-rw-r--r--lldb/test/API/functionalities/breakpoint/breakpoint_names/TestBreakpointNames.py20
-rw-r--r--lldb/test/API/functionalities/gdb_remote_client/TestJLink6Armv7RegisterDefinition.py2
-rw-r--r--lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py13
-rw-r--r--lldb/test/API/functionalities/progress_reporting/TestTrimmedProgressReporting.py3
-rw-r--r--lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py48
-rw-r--r--lldb/test/API/functionalities/thread/backtrace_limit/TestBacktraceLimit.py2
-rw-r--r--lldb/test/API/macosx/arm-corefile-regctx/TestArmMachoCorefileRegctx.py4
-rw-r--r--lldb/test/API/macosx/lc-note/addrable-bits/TestAddrableBitsCorefile.py2
-rw-r--r--lldb/test/API/macosx/lc-note/firmware-corefile/TestFirmwareCorefiles.py14
-rw-r--r--lldb/test/API/macosx/lc-note/kern-ver-str/TestKernVerStrLCNOTE.py2
-rw-r--r--lldb/test/API/macosx/lc-note/multiple-binary-corefile/TestMultipleBinaryCorefile.py2
-rw-r--r--lldb/test/API/macosx/queues/TestQueues.py3
-rw-r--r--lldb/test/API/macosx/safe-to-func-call/TestSafeFuncCalls.py3
-rw-r--r--lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py28
-rw-r--r--lldb/test/API/tools/lldb-dap/databreakpoint/Makefile3
-rw-r--r--lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py131
-rw-r--r--lldb/test/API/tools/lldb-dap/databreakpoint/main.cpp17
-rw-r--r--lldb/tools/lldb-dap/CMakeLists.txt1
-rw-r--r--lldb/tools/lldb-dap/DAPForward.h2
-rw-r--r--lldb/tools/lldb-dap/Watchpoint.cpp48
-rw-r--r--lldb/tools/lldb-dap/Watchpoint.h34
-rw-r--r--lldb/tools/lldb-dap/lldb-dap.cpp341
-rw-r--r--lldb/unittests/Interpreter/CMakeLists.txt1
-rw-r--r--lldb/unittests/Interpreter/TestOptions.cpp29
-rw-r--r--lldb/unittests/ScriptInterpreter/Python/PythonDataObjectsTests.cpp10
-rw-r--r--llvm/CMakeLists.txt16
-rw-r--r--llvm/cmake/modules/HandleLLVMOptions.cmake12
-rw-r--r--llvm/docs/CommandGuide/llvm-readelf.rst5
-rw-r--r--llvm/docs/CommandGuide/llvm-readobj.rst5
-rw-r--r--llvm/docs/LangRef.rst236
-rw-r--r--llvm/include/llvm/ADT/iterator_range.h4
-rw-r--r--llvm/include/llvm/CodeGen/MachinePassManager.h405
-rw-r--r--llvm/include/llvm/CodeGen/SDPatternMatch.h694
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h42
-rw-r--r--llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h26
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMP.td8
-rw-r--r--llvm/include/llvm/IR/Attributes.td28
-rw-r--r--llvm/include/llvm/IR/Constants.h18
-rw-r--r--llvm/include/llvm/IR/DebugProgramInstruction.h52
-rw-r--r--llvm/include/llvm/IR/IntrinsicInst.h3
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td20
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAArch64.td46
-rw-r--r--llvm/include/llvm/IR/VPIntrinsics.def24
-rw-r--r--llvm/include/llvm/LTO/Config.h3
-rw-r--r--llvm/include/llvm/Passes/CodeGenPassBuilder.h108
-rw-r--r--llvm/include/llvm/Passes/PassBuilder.h15
-rw-r--r--llvm/include/llvm/Target/TargetMachine.h10
-rw-r--r--llvm/include/llvm/Target/TargetSchedule.td2
-rw-r--r--llvm/include/llvm/TargetParser/PPCTargetParser.def57
-rw-r--r--llvm/include/llvm/TargetParser/X86TargetParser.def1
-rw-r--r--llvm/include/llvm/Transforms/IPO/HotColdSplitting.h15
-rw-r--r--llvm/lib/Analysis/Loads.cpp6
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp15
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp55
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp2
-rw-r--r--llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp9
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp5
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CallLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp12
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp5
-rw-r--r--llvm/lib/CodeGen/MachineFunctionSplitter.cpp11
-rw-r--r--llvm/lib/CodeGen/MachinePassManager.cpp183
-rw-r--r--llvm/lib/CodeGen/MachinePipeliner.cpp1
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp144
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/FastISel.cpp17
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp37
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp16
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/MatchContext.h175
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp79
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp29
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp10
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp73
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp22
-rw-r--r--llvm/lib/IR/AsmWriter.cpp78
-rw-r--r--llvm/lib/IR/Attributes.cpp5
-rw-r--r--llvm/lib/IR/BasicBlock.cpp18
-rw-r--r--llvm/lib/IR/Constants.cpp94
-rw-r--r--llvm/lib/IR/DebugProgramInstruction.cpp40
-rw-r--r--llvm/lib/IR/Instructions.cpp1
-rw-r--r--llvm/lib/IR/LLVMContextImpl.cpp2
-rw-r--r--llvm/lib/IR/LLVMContextImpl.h4
-rw-r--r--llvm/lib/LTO/LTOBackend.cpp2
-rw-r--r--llvm/lib/Object/SymbolSize.cpp7
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp48
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp3
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp65
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp14
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64SchedTSV110.td6
-rw-r--r--llvm/lib/Target/AArch64/AArch64StackTagging.cpp11
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCombine.td8
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp23
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp425
-rw-r--r--llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h19
-rw-r--r--llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/SIDefines.h21
-rw-r--r--llvm/lib/Target/AMDGPU/SIFrameLowering.cpp15
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp25
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp28
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td19
-rw-r--r--llvm/lib/Target/AMDGPU/SIModeRegister.cpp15
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h54
-rw-r--r--llvm/lib/Target/AMDGPU/VOP1Instructions.td79
-rw-r--r--llvm/lib/Target/AMDGPU/VOP2Instructions.td54
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3Instructions.td8
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3PInstructions.td12
-rw-r--r--llvm/lib/Target/AMDGPU/VOPCInstructions.td500
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td17
-rw-r--r--llvm/lib/Target/ARM/ARMLegalizerInfo.cpp56
-rw-r--r--llvm/lib/Target/DirectX/DXIL.td80
-rw-r--r--llvm/lib/Target/Hexagon/CMakeLists.txt3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonGenMemAbsolute.cpp274
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp56
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.h2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPostIncOpt.cpp689
-rw-r--r--llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp32
-rw-r--r--llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp324
-rw-r--r--llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h12
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp30
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp257
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td4
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp8
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp166
-rw-r--r--llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp31
-rw-r--r--llvm/lib/Target/RISCV/RISCVScheduleV.td21
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.h4
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetMachine.cpp2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.td7
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp17
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp5
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVISelLowering.h3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.td9
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp36
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp16
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp16
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp11
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp10
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.cpp19
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.h3
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp7
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp5
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp26
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp9
-rw-r--r--llvm/lib/Target/X86/X86CodeGenPassBuilder.cpp9
-rw-r--r--llvm/lib/Target/X86/X86TargetMachine.h7
-rw-r--r--llvm/lib/TargetParser/Host.cpp7
-rw-r--r--llvm/lib/Transforms/IPO/HotColdSplitting.cpp155
-rw-r--r--llvm/lib/Transforms/IPO/OpenMPOpt.cpp1
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp23
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp14
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/ConstraintElimination.cpp13
-rw-r--r--llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp49
-rw-r--r--llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp13
-rw-r--r--llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/BasicBlockUtils.cpp10
-rw-r--r--llvm/lib/Transforms/Utils/CodeExtractor.cpp45
-rw-r--r--llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/ValueMapper.cpp5
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp446
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h3
-rw-r--r--llvm/runtimes/CMakeLists.txt11
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/arith-fp-frem.ll68
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/arith-fp.ll22
-rw-r--r--llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll4
-rw-r--r--llvm/test/Bitcode/constant-splat.ll76
-rw-r--r--llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll31
-rw-r--r--llvm/test/CodeGen/AArch64/addsub.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/framelayout-sve.mir24
-rw-r--r--llvm/test/CodeGen/AArch64/reverse-csr-restore-seq.mir101
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-interface.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/stack-probing-sve.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll255
-rw-r--r--llvm/test/CodeGen/AArch64/sve-alloca.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sve-tailcall.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-pred.ll82
-rw-r--r--llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-vector.ll45
-rw-r--r--llvm/test/CodeGen/AArch64/unwind-preserved.ll96
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/combine-extract-vector-load.mir40
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir584
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll7262
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll72
-rw-r--r--llvm/test/CodeGen/Hexagon/atomicrmw-uinc-udec-wrap.ll6
-rw-r--r--llvm/test/CodeGen/Hexagon/bit-store-upper-sub-hi.mir21
-rw-r--r--llvm/test/CodeGen/Hexagon/isel/select-vec.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/load-const-extend-opt.ll68
-rw-r--r--llvm/test/CodeGen/Hexagon/post-inc-vec.mir413
-rw-r--r--llvm/test/CodeGen/Hexagon/post_inc_store.mir168
-rw-r--r--llvm/test/CodeGen/Hexagon/postincopt-crash.mir58
-rw-r--r--llvm/test/CodeGen/Hexagon/postincopt-dcfetch.mir19
-rw-r--r--llvm/test/CodeGen/Hexagon/reg-by-name.ll4
-rw-r--r--llvm/test/CodeGen/Hexagon/store-const-extend-opt.ll72
-rw-r--r--llvm/test/CodeGen/Hexagon/tfr-slotindex.ll26
-rw-r--r--llvm/test/CodeGen/Hexagon/valid-offset-loadbsw4.mir32
-rw-r--r--llvm/test/CodeGen/LoongArch/intrinsic-memcpy.ll34
-rw-r--r--llvm/test/CodeGen/NVPTX/param-load-store.ll93
-rw-r--r--llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll385
-rw-r--r--llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/make-compressible.mir499
-rw-r--r--llvm/test/CodeGen/RISCV/pr51206.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll1701
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll1697
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll1745
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll1740
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll2015
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll2014
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll2067
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll2065
-rw-r--r--llvm/test/CodeGen/RISCV/select.ll252
-rw-r--r--llvm/test/CodeGen/RISCV/sext-zext-trunc.ll61
-rw-r--r--llvm/test/CodeGen/RISCV/split-offsets.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/srem-vector-lkk.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/urem-vector-lkk.ll8
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll84
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll357
-rw-r--r--llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll30
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-strict-conv-17.ll24
-rw-r--r--llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll4
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir2
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue.ll10
-rw-r--r--llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll2
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/calllowering-tailcall.ll24
-rw-r--r--llvm/test/CodeGen/X86/tailcall-cgp-dup.ll58
-rw-r--r--llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test3
-rw-r--r--llvm/test/MC/AMDGPU/gfx11-promotions.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp16.s4
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp8.s2
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp16.s4
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp8.s2
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp16.txt8
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp8.txt4
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp16.txt8
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp8.txt4
-rw-r--r--llvm/test/Other/new-pm-defaults.ll6
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/TableGen/HwModeEncodeDecode2.td119
-rw-r--r--llvm/test/Transforms/ConstraintElimination/minmax.ll9
-rw-r--r--llvm/test/Transforms/ConstraintElimination/sext.ll84
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll136
-rw-r--r--llvm/test/Transforms/HotColdSplit/assumption-cache-invalidation.ll8
-rw-r--r--llvm/test/Transforms/HotColdSplit/eh-pads.ll7
-rw-r--r--llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll9
-rw-r--r--llvm/test/Transforms/HotColdSplit/outline-inner-region.ll49
-rw-r--r--llvm/test/Transforms/HotColdSplit/outline-outer-region.ll52
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/AMDGPU/multiple-uses-of-val.ll69
-rw-r--r--llvm/test/Transforms/Inline/inline-sign-return-address.ll104
-rw-r--r--llvm/test/Transforms/InstCombine/fpextend.ll11
-rw-r--r--llvm/test/Transforms/InstCombine/not.ll89
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/RISCV/term-fold-crash.ll43
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll90
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll370
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr72969.ll26
-rw-r--r--llvm/test/Transforms/OpenMP/deduplication_soundness.ll59
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/extractelements-to-shuffle.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/reorder-fmuladd-crash.ll7
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/tsc-s116.ll22
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll34
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll11
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder-reused-masked-gather.ll7
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reorder-vf-to-resize.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reorder.ll17
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll11
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll17
-rw-r--r--llvm/test/Transforms/SpeculativeExecution/PR46267.ll5
-rw-r--r--llvm/test/Transforms/VectorCombine/RISCV/load-widening.ll19
-rw-r--r--llvm/test/tools/llc/new-pm/pipeline.mir3
-rw-r--r--llvm/test/tools/llc/new-pm/start-stop.ll7
-rw-r--r--llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s83
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test32
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test76
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test31
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/decompress-zstd.test28
-rw-r--r--llvm/test/tools/llvm-symbolizer/wasm-basic.s53
-rw-r--r--llvm/tools/llc/NewPMDriver.cpp85
-rw-r--r--llvm/tools/llvm-gsymutil/Opts.td3
-rw-r--r--llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp29
-rw-r--r--llvm/tools/llvm-link/llvm-link.cpp10
-rw-r--r--llvm/tools/llvm-readobj/ObjDumper.cpp26
-rw-r--r--llvm/tools/llvm-readobj/ObjDumper.h4
-rw-r--r--llvm/tools/llvm-readobj/Opts.td2
-rw-r--r--llvm/tools/llvm-readobj/llvm-readobj.cpp6
-rw-r--r--llvm/unittests/Analysis/ValueTrackingTest.cpp59
-rw-r--r--llvm/unittests/CodeGen/CMakeLists.txt1
-rw-r--r--llvm/unittests/CodeGen/PassManagerTest.cpp213
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp292
-rw-r--r--llvm/unittests/DebugInfo/DWARF/DWARFDebugLineTest.cpp4
-rw-r--r--llvm/unittests/IR/VPIntrinsicTest.cpp8
-rw-r--r--llvm/unittests/MIR/CMakeLists.txt1
-rw-r--r--llvm/unittests/MIR/PassBuilderCallbacksTest.cpp216
-rw-r--r--llvm/utils/TableGen/Attributes.cpp6
-rw-r--r--llvm/utils/TableGen/DXILEmitter.cpp79
-rw-r--r--llvm/utils/TableGen/DecoderEmitter.cpp19
-rw-r--r--llvm/utils/TableGen/DisassemblerEmitter.cpp2
-rw-r--r--llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn1
-rwxr-xr-xllvm/utils/release/build_llvm_release.bat9
-rw-r--r--mlir/CMakeLists.txt1
-rw-r--r--mlir/include/mlir/Dialect/GPU/Transforms/Passes.h14
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td6
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td46
-rw-r--r--mlir/include/mlir/Dialect/Math/Transforms/Passes.h3
-rw-r--r--mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h9
-rw-r--r--mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td40
-rw-r--r--mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td2
-rw-r--r--mlir/include/mlir/Dialect/Utils/IndexingUtils.h3
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h43
-rw-r--r--mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h24
-rw-r--r--mlir/include/mlir/Interfaces/FunctionInterfaces.td226
-rw-r--r--mlir/include/mlir/Transforms/DialectConversion.h95
-rw-r--r--mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp18
-rw-r--r--mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp24
-rw-r--r--mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp28
-rw-r--r--mlir/lib/Dialect/GPU/CMakeLists.txt52
-rw-r--r--mlir/lib/Dialect/GPU/IR/GPUDialect.cpp11
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp180
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp5
-rw-r--r--mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp10
-rw-r--r--mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp45
-rw-r--r--mlir/lib/Dialect/SCF/IR/SCF.cpp15
-rw-r--r--mlir/lib/Dialect/Tosa/IR/TosaOps.cpp2
-rw-r--r--mlir/lib/Dialect/Utils/IndexingUtils.cpp11
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp27
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp41
-rw-r--r--mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp7
-rw-r--r--mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp50
-rw-r--r--mlir/lib/Target/LLVMIR/DebugImporter.cpp35
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleImport.cpp9
-rw-r--r--mlir/lib/Transforms/Utils/DialectConversion.cpp1574
-rw-r--r--mlir/test/CAPI/llvm.c3
-rw-r--r--mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir81
-rw-r--r--mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir15
-rw-r--r--mlir/test/Dialect/Linalg/canonicalize.mlir55
-rw-r--r--mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir40
-rw-r--r--mlir/test/Dialect/Linalg/invalid.mlir10
-rw-r--r--mlir/test/Dialect/Mesh/spmdization.mlir14
-rw-r--r--mlir/test/Dialect/Tosa/ops.mlir14
-rw-r--r--mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir82
-rw-r--r--mlir/test/Dialect/Vector/vector-transfer-flatten.mlir38
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir9
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir9
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir7
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir10
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir8
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir7
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir6
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-ops.mlir5
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir25
-rw-r--r--mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir24
-rw-r--r--mlir/test/Target/LLVMIR/Import/import-failure.ll40
-rw-r--r--mlir/test/lib/Dialect/ArmSME/CMakeLists.txt18
-rw-r--r--mlir/test/lib/Dialect/ArmSME/TestLowerToArmSME.cpp99
-rw-r--r--mlir/test/lib/Dialect/CMakeLists.txt1
-rw-r--r--mlir/test/lib/Dialect/Test/TestPatterns.cpp14
-rw-r--r--mlir/tools/mlir-opt/CMakeLists.txt1
-rw-r--r--mlir/tools/mlir-opt/mlir-opt.cpp2
-rw-r--r--mlir/unittests/Dialect/SPIRV/SerializationTest.cpp6
-rw-r--r--mlir/unittests/IR/InterfaceAttachmentTest.cpp2
-rw-r--r--mlir/unittests/IR/OperationSupportTest.cpp4
-rw-r--r--mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp8
-rw-r--r--openmp/libomptarget/CMakeLists.txt9
-rw-r--r--openmp/libomptarget/include/PluginManager.h14
-rw-r--r--openmp/libomptarget/include/Shared/PluginAPI.inc72
-rw-r--r--openmp/libomptarget/include/Shared/SourceInfo.h1
-rw-r--r--openmp/libomptarget/include/omptarget.h6
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt6
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp3
-rw-r--r--openmp/libomptarget/src/OffloadRTL.cpp38
-rw-r--r--openmp/libomptarget/src/PluginManager.cpp6
-rw-r--r--openmp/libomptarget/src/exports2
-rw-r--r--openmp/libomptarget/src/interface.cpp20
-rw-r--r--openmp/libomptarget/test/lit.cfg8
-rw-r--r--openmp/libomptarget/test/offloading/runtime_init.c30
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel3
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel71
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel19
657 files changed, 36382 insertions, 10844 deletions
diff --git a/.github/new-prs-labeler.yml b/.github/new-prs-labeler.yml
index 7a37a96d6e38..8ed976fbdddc 100644
--- a/.github/new-prs-labeler.yml
+++ b/.github/new-prs-labeler.yml
@@ -846,6 +846,26 @@ backend:PowerPC:
- clang/lib/Driver/ToolChains/Arch/PPC.*
- clang/test/CodeGen/PowerPC/**
+backend:SystemZ:
+ - llvm/include/llvm/BinaryFormat/ELFRelocs/SystemZ*
+ - llvm/include/llvm/BinaryFormat/GOFF.h
+ - llvm/include/llvm/IR/IntrinsicsSystemZ.td
+ - llvm/lib/Target/SystemZ/**
+ - llvm/test/Analysis/**/SystemZ/**
+ - llvm/test/CodeGen/SystemZ/**
+ - llvm/test/DebugInfo/SystemZ/**
+ - llvm/test/ExecutionEngine/**/SystemZ/**
+ - llvm/test/MC/Disassembler/SystemZ/**
+ - llvm/test/MC/GOFF/**
+ - llvm/test/MC/SystemZ/**
+ - llvm/test/Transforms/**/SystemZ/**
+ - clang/include/clang/Basic/BuiltinsSystemZ.*
+ - clang/lib/Basic/Targets/SystemZ.*
+ - clang/lib/CodeGen/Targets/SystemZ.cpp
+ - clang/lib/Driver/ToolChains/ZOS*
+ - clang/lib/Driver/ToolChains/Arch/SystemZ.*
+ - clang/test/CodeGen/SystemZ/**
+
third-party:unittests:
- third-party/unittests/**
diff --git a/bolt/include/bolt/Core/DIEBuilder.h b/bolt/include/bolt/Core/DIEBuilder.h
index f13d42ff4ab4..f0db924e2ccb 100644
--- a/bolt/include/bolt/Core/DIEBuilder.h
+++ b/bolt/include/bolt/Core/DIEBuilder.h
@@ -124,7 +124,7 @@ private:
std::vector<std::unique_ptr<DIEAbbrev>> Abbreviations;
BinaryContext &BC;
DWARFContext *DwarfContext{nullptr};
- bool IsDWO{false};
+ DWARFUnit *SkeletonCU{nullptr};
uint64_t UnitSize{0};
llvm::DenseSet<uint64_t> AllProcessed;
@@ -264,8 +264,12 @@ private:
/// current Section.
DIE *constructDIEFast(DWARFDie &DDie, DWARFUnit &U, uint32_t UnitId);
+ /// Returns true if this DIEBUilder is for DWO Unit.
+ bool isDWO() const { return SkeletonCU != nullptr; }
+
public:
- DIEBuilder(BinaryContext &BC, DWARFContext *DwarfContext, bool IsDWO = false);
+ DIEBuilder(BinaryContext &BC, DWARFContext *DwarfContext,
+ DWARFUnit *SkeletonCU = nullptr);
/// Returns enum to what we are currently processing.
ProcessingType getCurrentProcessingState() { return getState().Type; }
diff --git a/bolt/include/bolt/Core/DebugData.h b/bolt/include/bolt/Core/DebugData.h
index 31a636ba2ce6..48b813a4ca11 100644
--- a/bolt/include/bolt/Core/DebugData.h
+++ b/bolt/include/bolt/Core/DebugData.h
@@ -450,10 +450,16 @@ public:
return std::move(StrOffsetsBuffer);
}
-private:
/// Initializes Buffer and Stream.
void initialize(DWARFUnit &Unit);
+ /// Clear data.
+ void clear() {
+ IndexToAddressMap.clear();
+ StrOffsets.clear();
+ }
+
+private:
std::unique_ptr<DebugStrOffsetsBufferVector> StrOffsetsBuffer;
std::unique_ptr<raw_svector_ostream> StrOffsetsStream;
std::map<uint32_t, uint32_t> IndexToAddressMap;
diff --git a/bolt/lib/Core/DIEBuilder.cpp b/bolt/lib/Core/DIEBuilder.cpp
index 3c72c745086b..e6104b81bf6c 100644
--- a/bolt/lib/Core/DIEBuilder.cpp
+++ b/bolt/lib/Core/DIEBuilder.cpp
@@ -179,8 +179,8 @@ void DIEBuilder::constructFromUnit(DWARFUnit &DU) {
}
DIEBuilder::DIEBuilder(BinaryContext &BC, DWARFContext *DwarfContext,
- bool IsDWO)
- : BC(BC), DwarfContext(DwarfContext), IsDWO(IsDWO) {}
+ DWARFUnit *SkeletonCU)
+ : BC(BC), DwarfContext(DwarfContext), SkeletonCU(SkeletonCU) {}
static unsigned int getCUNum(DWARFContext *DwarfContext, bool IsDWO) {
unsigned int CUNum = IsDWO ? DwarfContext->getNumDWOCompileUnits()
@@ -204,11 +204,11 @@ void DIEBuilder::buildTypeUnits(DebugStrOffsetsWriter *StrOffsetWriter,
true);
}
}
- const unsigned int CUNum = getCUNum(DwarfContext, IsDWO);
+ const unsigned int CUNum = getCUNum(DwarfContext, isDWO());
getState().CloneUnitCtxMap.resize(CUNum);
DWARFContext::unit_iterator_range CU4TURanges =
- IsDWO ? DwarfContext->dwo_types_section_units()
- : DwarfContext->types_section_units();
+ isDWO() ? DwarfContext->dwo_types_section_units()
+ : DwarfContext->types_section_units();
getState().Type = ProcessingType::DWARF4TUs;
for (std::unique_ptr<DWARFUnit> &DU : CU4TURanges)
@@ -218,8 +218,8 @@ void DIEBuilder::buildTypeUnits(DebugStrOffsetsWriter *StrOffsetWriter,
constructFromUnit(*DU.get());
DWARFContext::unit_iterator_range CURanges =
- IsDWO ? DwarfContext->dwo_info_section_units()
- : DwarfContext->info_section_units();
+ isDWO() ? DwarfContext->dwo_info_section_units()
+ : DwarfContext->info_section_units();
// This handles DWARF4 CUs and DWARF5 CU/TUs.
// Creating a vector so that for reference handling only DWARF5 CU/TUs are
@@ -242,11 +242,11 @@ void DIEBuilder::buildCompileUnits(const bool Init) {
if (Init)
BuilderState.reset(new State());
- unsigned int CUNum = getCUNum(DwarfContext, IsDWO);
+ unsigned int CUNum = getCUNum(DwarfContext, isDWO());
getState().CloneUnitCtxMap.resize(CUNum);
DWARFContext::unit_iterator_range CURanges =
- IsDWO ? DwarfContext->dwo_info_section_units()
- : DwarfContext->info_section_units();
+ isDWO() ? DwarfContext->dwo_info_section_units()
+ : DwarfContext->info_section_units();
// This handles DWARF4 CUs and DWARF5 CU/TUs.
// Creating a vector so that for reference handling only DWARF5 CU/TUs are
diff --git a/bolt/lib/Core/DebugData.cpp b/bolt/lib/Core/DebugData.cpp
index 2942f0b9190f..a75016ede309 100644
--- a/bolt/lib/Core/DebugData.cpp
+++ b/bolt/lib/Core/DebugData.cpp
@@ -909,8 +909,7 @@ void DebugStrOffsetsWriter::finalizeSection(DWARFUnit &Unit,
}
StrOffsetSectionWasModified = false;
- IndexToAddressMap.clear();
- StrOffsets.clear();
+ clear();
}
void DebugStrWriter::create() {
diff --git a/bolt/lib/Rewrite/DWARFRewriter.cpp b/bolt/lib/Rewrite/DWARFRewriter.cpp
index a77f401c64c8..849c363730eb 100644
--- a/bolt/lib/Rewrite/DWARFRewriter.cpp
+++ b/bolt/lib/Rewrite/DWARFRewriter.cpp
@@ -709,7 +709,7 @@ void DWARFRewriter::updateDebugInfo() {
: LegacyRangesSectionWriter.get();
// Skipping CUs that failed to load.
if (SplitCU) {
- DIEBuilder DWODIEBuilder(BC, &(*SplitCU)->getContext(), true);
+ DIEBuilder DWODIEBuilder(BC, &(*SplitCU)->getContext(), Unit);
DWODIEBuilder.buildDWOUnit(**SplitCU);
std::string DWOName = updateDWONameCompDir(
*Unit, *DIEBlder, *DIEBlder->getUnitDIEbyUnit(*Unit));
diff --git a/clang-tools-extra/clangd/refactor/Rename.cpp b/clang-tools-extra/clangd/refactor/Rename.cpp
index 650862c99bcd..4e135801f685 100644
--- a/clang-tools-extra/clangd/refactor/Rename.cpp
+++ b/clang-tools-extra/clangd/refactor/Rename.cpp
@@ -811,8 +811,18 @@ renameWithinFile(ParsedAST &AST, const NamedDecl &RenameDecl,
continue;
Locs.push_back(RenameLoc);
}
- if (const auto *MD = dyn_cast<ObjCMethodDecl>(&RenameDecl))
- return renameObjCMethodWithinFile(AST, MD, NewName, std::move(Locs));
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(&RenameDecl)) {
+ // The custom ObjC selector logic doesn't handle the zero arg selector
+ // case, as it relies on parsing selectors via the trailing `:`.
+ // We also choose to use regular rename logic for the single-arg selectors
+ // as the AST/Index has the right locations in that case.
+ if (MD->getSelector().getNumArgs() > 1)
+ return renameObjCMethodWithinFile(AST, MD, NewName, std::move(Locs));
+
+ // Eat trailing : for single argument methods since they're actually
+ // considered a separate token during rename.
+ NewName.consume_back(":");
+ }
for (const auto &Loc : Locs) {
if (auto Err = FilteredChanges.add(tooling::Replacement(
SM, CharSourceRange::getTokenRange(Loc), NewName)))
@@ -930,10 +940,9 @@ renameOutsideFile(const NamedDecl &RenameDecl, llvm::StringRef MainFilePath,
std::optional<Selector> Selector = std::nullopt;
llvm::SmallVector<llvm::StringRef, 8> NewNames;
if (const auto *MD = dyn_cast<ObjCMethodDecl>(&RenameDecl)) {
- if (MD->getSelector().getNumArgs() > 1) {
- RenameIdentifier = MD->getSelector().getNameForSlot(0).str();
+ RenameIdentifier = MD->getSelector().getNameForSlot(0).str();
+ if (MD->getSelector().getNumArgs() > 1)
Selector = MD->getSelector();
- }
}
NewName.split(NewNames, ":");
diff --git a/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp b/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp
index 555c4c574998..75a140767035 100644
--- a/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp
+++ b/clang-tools-extra/clangd/unittests/ClangdLSPServerTests.cpp
@@ -11,6 +11,7 @@
#include "ClangdServer.h"
#include "ConfigProvider.h"
#include "Diagnostics.h"
+#include "Feature.h"
#include "FeatureModule.h"
#include "LSPBinder.h"
#include "LSPClient.h"
@@ -198,6 +199,9 @@ TEST_F(LSPTest, RecordsLatencies) {
// clang-tidy's renames are converted to clangd's internal rename functionality,
// see clangd#1589 and clangd#741
TEST_F(LSPTest, ClangTidyRename) {
+ // This test requires clang-tidy checks to be linked in.
+ if (!CLANGD_TIDY_CHECKS)
+ return;
Annotations Header(R"cpp(
void [[foo]]();
)cpp");
@@ -214,7 +218,9 @@ TEST_F(LSPTest, ClangTidyRename) {
Client.didOpen("foo.hpp", Header.code());
Client.didOpen("foo.cpp", Source.code());
- auto RenameDiag = Client.diagnostics("foo.cpp").value().at(0);
+ auto Diags = Client.diagnostics("foo.cpp");
+ ASSERT_TRUE(Diags && !Diags->empty());
+ auto RenameDiag = Diags->front();
auto RenameCommand =
(*Client
diff --git a/clang-tools-extra/clangd/unittests/RenameTests.cpp b/clang-tools-extra/clangd/unittests/RenameTests.cpp
index d91ef85d6727..7d9252110b27 100644
--- a/clang-tools-extra/clangd/unittests/RenameTests.cpp
+++ b/clang-tools-extra/clangd/unittests/RenameTests.cpp
@@ -1943,6 +1943,144 @@ TEST(CrossFileRenameTests, WithUpToDateIndex) {
}
}
+TEST(CrossFileRenameTests, ObjC) {
+ MockCompilationDatabase CDB;
+ CDB.ExtraClangFlags = {"-xobjective-c"};
+ // rename is runnning on all "^" points in FooH.
+ struct Case {
+ llvm::StringRef FooH;
+ llvm::StringRef FooM;
+ llvm::StringRef NewName;
+ llvm::StringRef ExpectedFooH;
+ llvm::StringRef ExpectedFooM;
+ };
+ Case Cases[] = {// --- Zero arg selector
+ {
+ // Input
+ R"cpp(
+ @interface Foo
+ - (int)performA^ction;
+ @end
+ )cpp",
+ R"cpp(
+ @implementation Foo
+ - (int)performAction {
+ [self performAction];
+ }
+ @end
+ )cpp",
+ // New name
+ "performNewAction",
+ // Expected
+ R"cpp(
+ @interface Foo
+ - (int)performNewAction;
+ @end
+ )cpp",
+ R"cpp(
+ @implementation Foo
+ - (int)performNewAction {
+ [self performNewAction];
+ }
+ @end
+ )cpp",
+ },
+ // --- Single arg selector
+ {
+ // Input
+ R"cpp(
+ @interface Foo
+ - (int)performA^ction:(int)action;
+ @end
+ )cpp",
+ R"cpp(
+ @implementation Foo
+ - (int)performAction:(int)action {
+ [self performAction:action];
+ }
+ @end
+ )cpp",
+ // New name
+ "performNewAction:",
+ // Expected
+ R"cpp(
+ @interface Foo
+ - (int)performNewAction:(int)action;
+ @end
+ )cpp",
+ R"cpp(
+ @implementation Foo
+ - (int)performNewAction:(int)action {
+ [self performNewAction:action];
+ }
+ @end
+ )cpp",
+ },
+ // --- Multi arg selector
+ {
+ // Input
+ R"cpp(
+ @interface Foo
+ - (int)performA^ction:(int)action with:(int)value;
+ @end
+ )cpp",
+ R"cpp(
+ @implementation Foo
+ - (int)performAction:(int)action with:(int)value {
+ [self performAction:action with:value];
+ }
+ @end
+ )cpp",
+ // New name
+ "performNewAction:by:",
+ // Expected
+ R"cpp(
+ @interface Foo
+ - (int)performNewAction:(int)action by:(int)value;
+ @end
+ )cpp",
+ R"cpp(
+ @implementation Foo
+ - (int)performNewAction:(int)action by:(int)value {
+ [self performNewAction:action by:value];
+ }
+ @end
+ )cpp",
+ }};
+
+ trace::TestTracer Tracer;
+ for (const auto &T : Cases) {
+ SCOPED_TRACE(T.FooH);
+ Annotations FooH(T.FooH);
+ Annotations FooM(T.FooM);
+ std::string FooHPath = testPath("foo.h");
+ std::string FooMPath = testPath("foo.m");
+
+ MockFS FS;
+ FS.Files[FooHPath] = std::string(FooH.code());
+ FS.Files[FooMPath] = std::string(FooM.code());
+
+ auto ServerOpts = ClangdServer::optsForTest();
+ ServerOpts.BuildDynamicSymbolIndex = true;
+ ClangdServer Server(CDB, FS, ServerOpts);
+
+ // Add all files to clangd server to make sure the dynamic index has been
+ // built.
+ runAddDocument(Server, FooHPath, FooH.code());
+ runAddDocument(Server, FooMPath, FooM.code());
+
+ for (const auto &RenamePos : FooH.points()) {
+ EXPECT_THAT(Tracer.takeMetric("rename_files"), SizeIs(0));
+ auto FileEditsList =
+ llvm::cantFail(runRename(Server, FooHPath, RenamePos, T.NewName, {}));
+ EXPECT_THAT(Tracer.takeMetric("rename_files"), ElementsAre(2));
+ EXPECT_THAT(applyEdits(std::move(FileEditsList.GlobalChanges)),
+ UnorderedElementsAre(Pair(Eq(FooHPath), Eq(T.ExpectedFooH)),
+ Pair(Eq(FooMPath), Eq(T.ExpectedFooM))));
+ }
+ }
+}
+
TEST(CrossFileRenameTests, CrossFileOnLocalSymbol) {
// cross-file rename should work for function-local symbols, even there is no
// index provided.
diff --git a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
index 6c4d9b7862d9..277e6ec5b089 100644
--- a/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
+++ b/clang-tools-extra/include-cleaner/lib/WalkAST.cpp
@@ -128,6 +128,11 @@ public:
bool VisitDeclRefExpr(DeclRefExpr *DRE) {
auto *FD = DRE->getFoundDecl();
+ // Prefer the underlying decl if FoundDecl isn't a shadow decl, e.g:
+ // - For templates, found-decl is always primary template, but we want the
+ // specializaiton itself.
+ if (!llvm::isa<UsingShadowDecl>(FD))
+ FD = DRE->getDecl();
// For refs to non-meber-like decls, use the found decl.
// For member-like decls, we should have a reference from the qualifier to
// the container decl instead, which is preferred as it'll handle
diff --git a/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp b/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
index bdfc24b8edee..e238dc3d902b 100644
--- a/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
+++ b/clang-tools-extra/include-cleaner/unittests/WalkASTTest.cpp
@@ -213,6 +213,8 @@ TEST(WalkAST, VarTemplates) {
template<typename T> T* $explicit^Foo<T*> = nullptr;)cpp",
"int *x = ^Foo<int *>;"),
ElementsAre(Decl::VarTemplateSpecialization));
+ // Implicit specializations through explicit instantiations has source
+ // locations pointing at the primary template.
EXPECT_THAT(testWalk(R"cpp(
template<typename T> T $explicit^Foo = 0;
template int Foo<int>;)cpp",
@@ -239,18 +241,19 @@ TEST(WalkAST, FunctionTemplates) {
EXPECT_THAT(testWalk(R"cpp(
template <typename T> void $explicit^foo() {})cpp",
"auto x = []{ ^foo<int>(); };"),
- ElementsAre(Decl::FunctionTemplate));
- // FIXME: DeclRefExpr points at primary template, not the specialization.
+ ElementsAre(Decl::Function));
EXPECT_THAT(testWalk(R"cpp(
- template<typename T> void $explicit^foo() {}
- template<> void foo<int>(){})cpp",
+ template<typename T> void foo() {}
+ template<> void $explicit^foo<int>(){})cpp",
"auto x = []{ ^foo<int>(); };"),
- ElementsAre(Decl::FunctionTemplate));
+ ElementsAre(Decl::Function));
+ // The decl is actually the specialization, but explicit instantations point
+ // at the primary template.
EXPECT_THAT(testWalk(R"cpp(
template<typename T> void $explicit^foo() {};
template void foo<int>();)cpp",
"auto x = [] { ^foo<int>(); };"),
- ElementsAre(Decl::FunctionTemplate));
+ ElementsAre(Decl::Function));
}
TEST(WalkAST, TemplateSpecializationsFromUsingDecl) {
// Class templates
@@ -548,7 +551,8 @@ TEST(WalkAST, Concepts) {
testWalk(Concept, "template<typename T> void func() requires ^Foo<T> {}");
testWalk(Concept, "void func(^Foo auto x) {}");
// FIXME: Foo should be explicitly referenced.
- testWalk("template<typename T> concept Foo = true;", "void func() { ^Foo auto x = 1; }");
+ testWalk("template<typename T> concept Foo = true;",
+ "void func() { ^Foo auto x = 1; }");
}
TEST(WalkAST, FriendDecl) {
diff --git a/clang/docs/HLSL/ExpectedDifferences.rst b/clang/docs/HLSL/ExpectedDifferences.rst
new file mode 100644
index 000000000000..60001b22dc79
--- /dev/null
+++ b/clang/docs/HLSL/ExpectedDifferences.rst
@@ -0,0 +1,110 @@
+
+Expected Differences vs DXC and FXC
+===================================
+
+.. contents::
+ :local:
+
+Introduction
+============
+
+HLSL currently has two reference compilers, the `DirectX Shader Compiler (DXC)
+<https://github.com/microsoft/DirectXShaderCompiler/>`_ and the
+`Effect-Compiler (FXC) <https://learn.microsoft.com/en-us/windows/win32/direct3dtools/fxc>`_.
+The two reference compilers do not fully agree. Some known disagreements in the
+references are tracked on
+`DXC's GitHub
+<https://github.com/microsoft/DirectXShaderCompiler/issues?q=is%3Aopen+is%3Aissue+label%3Afxc-disagrees>`_,
+but many more are known to exist.
+
+HLSL as implemented by Clang will also not fully match either of the reference
+implementations, it is instead being written to match the `draft language
+specification <https://microsoft.github.io/hlsl-specs/specs/hlsl.pdf>`_.
+
+This document is a non-exhaustive collection the known differences between
+Clang's implementation of HLSL and the existing reference compilers.
+
+General Principles
+------------------
+
+Most of the intended differences between Clang and the earlier reference
+compilers are focused on increased consistency and correctness. Both reference
+compilers do not always apply language rules the same in all contexts.
+
+Clang also deviates from the reference compilers by providing different
+diagnostics, both in terms of the textual messages and the contexts in which
+diagnostics are produced. While striving for a high level of source
+compatibility with conforming HLSL code, Clang may produce earlier and more
+robust diagnostics for incorrect code or reject code that a reference compiler
+incorrectly accepted.
+
+Language Version
+================
+
+Clang targets language compatibility for HLSL 2021 as implemented by DXC.
+Language features that were removed in earlier versions of HLSL may be added on
+a case-by-case basis, but are not planned for the initial implementation.
+
+Overload Resolution
+===================
+
+Clang's HLSL implementation adopts C++ overload resolution rules as proposed for
+HLSL 202x based on proposal
+`0007 <https://github.com/microsoft/hlsl-specs/blob/main/proposals/0007-const-instance-methods.md>`_
+and
+`0008 <https://github.com/microsoft/hlsl-specs/blob/main/proposals/0008-non-member-operator-overloading.md>`_.
+
+Clang's implementation extends standard overload resolution rules to HLSL
+library functionality. This causes subtle changes in overload resolution
+behavior between Clang and DXC. Some examples include:
+
+.. code-block:: c++
+
+ void halfOrInt16(half H);
+ void halfOrInt16(uint16_t U);
+ void halfOrInt16(int16_t I);
+
+ void takesDoubles(double, double, double);
+
+ cbuffer CB {
+ uint U;
+ int I;
+ float X, Y, Z;
+ double3 A, B;
+ }
+
+ export void call() {
+ halfOrInt16(U); // DXC: Fails with call ambiguous between int16_t and uint16_t overloads
+ // Clang: Resolves to halfOrInt16(uint16_t).
+ halfOrInt16(I); // All: Resolves to halfOrInt16(int16_t).
+ half H;
+ #ifndef IGNORE_ERRORS
+ // asfloat16 is a builtin with overloads for half, int16_t, and uint16_t.
+ H = asfloat16(I); // DXC: Fails to resolve overload for int.
+ // Clang: Resolves to asfloat16(int16_t).
+ H = asfloat16(U); // DXC: Fails to resolve overload for int.
+ // Clang: Resolves to asfloat16(uint16_t).
+ #endif
+ H = asfloat16(0x01); // DXC: Resolves to asfloat16(half).
+ // Clang: Resolves to asfloat16(uint16_t).
+
+ takesDoubles(X, Y, Z); // Works on all compilers
+ #ifndef IGNORE_ERRORS
+ fma(X, Y, Z); // DXC: Fails to resolve no known conversion from float to double.
+ // Clang: Resolves to fma(double,double,double).
+ #endif
+
+ double D = dot(A, B); // DXC: Resolves to dot(double3, double3), fails DXIL Validation.
+ // FXC: Expands to compute double dot product with fmul/fadd
+ // Clang: Resolves to dot(float3, float3), emits conversion warnings.
+
+ }
+
+.. note::
+
+ In Clang, a conscious decision was made to exclude the ``dot(vector<double,N>, vector<double,N>)``
+ overload and allow overload resolution to resolve the
+ ``vector<float,N>`` overload. This approach provides ``-Wconversion``
+ diagnostic notifying the user of the conversion rather than silently altering
+ precision relative to the other overloads (as FXC does) or generating code
+ that will fail validation (as DXC does).
diff --git a/clang/docs/HLSL/HLSLDocs.rst b/clang/docs/HLSL/HLSLDocs.rst
index 1f232129548d..97b2425f013b 100644
--- a/clang/docs/HLSL/HLSLDocs.rst
+++ b/clang/docs/HLSL/HLSLDocs.rst
@@ -11,6 +11,7 @@ HLSL Design and Implementation
.. toctree::
:maxdepth: 1
+ ExpectedDifferences
HLSLIRReference
ResourceTypes
EntryFunctions
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index ef2d9b8e46ae..529dd783ab73 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -62,21 +62,6 @@ Clang Frontend Potentially Breaking Changes
of ``-Wno-gnu-binary-literal`` will no longer silence this pedantic warning,
which may break existing uses with ``-Werror``.
-Target OS macros extension
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-A new Clang extension (see :ref:`here <target_os_detail>`) is enabled for
-Darwin (Apple platform) targets. Clang now defines ``TARGET_OS_*`` macros for
-these targets, which could break existing code bases with improper checks for
-the ``TARGET_OS_`` macros. For example, existing checks might fail to include
-the ``TargetConditionals.h`` header from Apple SDKs and therefore leaving the
-macros undefined and guarded code unexercised.
-
-Affected code should be checked to see if it's still intended for the specific
-target and fixed accordingly.
-
-The extension can be turned off by the option ``-fno-define-target-os-macros``
-as a workaround.
-
What's New in Clang |release|?
==============================
Some of the major new features and improvements to Clang are listed
@@ -98,8 +83,6 @@ C++20 Feature Support
- Implemented the `__is_layout_compatible` intrinsic to support
`P0466R5: Layout-compatibility and Pointer-interconvertibility Traits <https://wg21.link/P0466R5>`_.
- Note: `CWG2759: [[no_unique_address] and common initial sequence <https://cplusplus.github.io/CWG/issues/2759.html>`_
- is not yet implemented.
C++23 Feature Support
^^^^^^^^^^^^^^^^^^^^^
@@ -123,6 +106,10 @@ Resolutions to C++ Defect Reports
of two types.
(`CWG1719: Layout compatibility and cv-qualification revisited <https://cplusplus.github.io/CWG/issues/1719.html>`_).
+- ``[[no_unique_address]]`` is now respected when evaluating layout
+ compatibility of two types.
+ (`CWG2759: [[no_unique_address] and common initial sequence <https://cplusplus.github.io/CWG/issues/2759.html>`_).
+
C Language Changes
------------------
@@ -161,17 +148,6 @@ Non-comprehensive list of changes in this release
New Compiler Flags
------------------
-.. _target_os_detail:
-
-Target OS macros extension
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-A pair of new flags ``-fdefine-target-os-macros`` and
-``-fno-define-target-os-macros`` has been added to Clang to enable/disable the
-extension to provide built-in definitions of a list of ``TARGET_OS_*`` macros
-based on the target triple.
-
-The extension is enabled by default for Darwin (Apple platform) targets.
-
Deprecated Compiler Flags
-------------------------
@@ -296,11 +272,15 @@ Bug Fixes to C++ Support
local variable, which is supported as a C11 extension in C++. Previously, it
was only accepted at namespace scope but not at local function scope.
- Clang no longer tries to call consteval constructors at runtime when they appear in a member initializer.
- (`#782154 <https://github.com/llvm/llvm-project/issues/82154>`_`)
+ (`#82154 <https://github.com/llvm/llvm-project/issues/82154>`_`)
- Fix crash when using an immediate-escalated function at global scope.
(`#82258 <https://github.com/llvm/llvm-project/issues/82258>`_)
- Correctly immediate-escalate lambda conversion functions.
(`#82258 <https://github.com/llvm/llvm-project/issues/82258>`_)
+- Fixed an issue where template parameters of a nested abbreviated generic lambda within
+ a requires-clause lie at the same depth as those of the surrounding lambda. This,
+ in turn, results in the wrong template argument substitution during constraint checking.
+ (`#78524 <https://github.com/llvm/llvm-project/issues/78524>`_)
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -326,6 +306,17 @@ X86 Support
Arm and AArch64 Support
^^^^^^^^^^^^^^^^^^^^^^^
+- ARMv7+ targets now default to allowing unaligned access, except Armv6-M, and
+ Armv8-M without the Main Extension. Baremetal targets should check that the
+ new default will work with their system configurations, since it requires
+ that SCTLR.A is 0, SCTLR.U is 1, and that the memory in question is
+ configured as "normal" memory. This brings Clang in-line with the default
+ settings for GCC and Arm Compiler. Aside from making Clang align with other
+ compilers, changing the default brings major performance and code size
+ improvements for most targets. We have not changed the default behavior for
+ ARMv6, but may revisit that decision in the future. Users can restore the old
+ behavior with -m[no-]unaligned-access.
+
Android Support
^^^^^^^^^^^^^^^
diff --git a/clang/docs/StandardCPlusPlusModules.rst b/clang/docs/StandardCPlusPlusModules.rst
index 0347ff077fdb..c5478bba45f3 100644
--- a/clang/docs/StandardCPlusPlusModules.rst
+++ b/clang/docs/StandardCPlusPlusModules.rst
@@ -868,6 +868,9 @@ headers to:
...
#endif
+If the modules imported by your library provides such headers too, remember to add them to
+your ``your_library_imported.h`` too.
+
Importing modules
~~~~~~~~~~~~~~~~~
diff --git a/clang/docs/analyzer/checkers.rst b/clang/docs/analyzer/checkers.rst
index 510629d8a2d4..899622ae283b 100644
--- a/clang/docs/analyzer/checkers.rst
+++ b/clang/docs/analyzer/checkers.rst
@@ -213,9 +213,8 @@ Check for undefined results of binary operators.
core.VLASize (C)
""""""""""""""""
-Check for declarations of Variable Length Arrays of undefined or zero size.
-
- Check for declarations of VLA of undefined or zero size.
+Check for declarations of Variable Length Arrays (VLA) of undefined, zero or negative
+size.
.. code-block:: c
@@ -229,6 +228,28 @@ Check for declarations of Variable Length Arrays of undefined or zero size.
int vla2[x]; // warn: zero size
}
+
+The checker also gives warning if the `TaintPropagation` checker is switched on
+and an unbound, attacker controlled (tainted) value is used to define
+the size of the VLA.
+
+.. code-block:: c
+
+ void taintedVLA(void) {
+ int x;
+ scanf("%d", &x);
+ int vla[x]; // Declared variable-length array (VLA) has tainted (attacker controlled) size, that can be 0 or negative
+ }
+
+ void taintedVerfieidVLA(void) {
+ int x;
+ scanf("%d", &x);
+ if (x<1)
+ return;
+ int vla[x]; // no-warning. The analyzer can prove that x must be positive.
+ }
+
+
.. _core-uninitialized-ArraySubscript:
core.uninitialized.ArraySubscript (C)
diff --git a/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index b1a282f5164a..dcd2c19fb7ee 100644
--- a/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -279,6 +279,10 @@ def err_builtin_needs_feature : Error<"%0 needs target feature %1">;
def err_function_needs_feature : Error<
"always_inline function %1 requires target feature '%2', but would "
"be inlined into function %0 that is compiled without support for '%2'">;
+def err_function_always_inline_attribute_mismatch : Error<
+ "always_inline function %1 and its caller %0 have mismatching %2 attributes">;
+def err_function_always_inline_new_za : Error<
+ "always_inline function %0 has new za state">;
def warn_avx_calling_convention
: Warning<"AVX vector %select{return|argument}0 of type %1 without '%2' "
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 11411883e1bf..a7f2858477be 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -10364,6 +10364,8 @@ def err_x86_builtin_tile_arg_duplicate : Error<
def err_builtin_target_unsupported : Error<
"builtin is not supported on this target">;
+def err_builtin_aix_os_unsupported : Error<
+ "this builtin is available only on AIX 7.2 and later operating systems">;
def err_builtin_longjmp_unsupported : Error<
"__builtin_longjmp is not supported for the current target">;
def err_builtin_setjmp_unsupported : Error<
@@ -12201,4 +12203,6 @@ def warn_acc_clause_unimplemented
def err_acc_construct_appertainment
: Error<"OpenACC construct '%0' cannot be used here; it can only "
"be used in a statement context">;
+def err_acc_branch_in_out
+ : Error<"invalid branch %select{out of|into}0 OpenACC Compute Construct">;
} // end of sema component.
diff --git a/clang/include/clang/Basic/arm_sme.td b/clang/include/clang/Basic/arm_sme.td
index 2da0e8d2aba9..1ac6d5170ea2 100644
--- a/clang/include/clang/Basic/arm_sme.td
+++ b/clang/include/clang/Basic/arm_sme.td
@@ -142,7 +142,7 @@ let TargetGuard = "sme" in {
def SVZERO_MASK_ZA : SInst<"svzero_mask_za", "vi", "", MergeNone, "aarch64_sme_zero",
[IsOverloadNone, IsStreamingCompatible, IsInOutZA],
[ImmCheck<0, ImmCheck0_255>]>;
- def SVZERO_ZA : SInst<"svzero_za", "v", "", MergeNone, "aarch64_sme_zero",
+ def SVZERO_ZA : SInst<"svzero_za", "vv", "", MergeNone, "aarch64_sme_zero",
[IsOverloadNone, IsStreamingCompatible, IsOutZA]>;
}
diff --git a/clang/include/clang/Driver/ToolChain.h b/clang/include/clang/Driver/ToolChain.h
index 2d0c1f826c17..fbe2e8fe8e88 100644
--- a/clang/include/clang/Driver/ToolChain.h
+++ b/clang/include/clang/Driver/ToolChain.h
@@ -120,6 +120,11 @@ public:
RM_Disabled,
};
+ enum ExceptionsMode {
+ EM_Enabled,
+ EM_Disabled,
+ };
+
struct BitCodeLibraryInfo {
std::string Path;
bool ShouldInternalize;
@@ -141,6 +146,8 @@ private:
const RTTIMode CachedRTTIMode;
+ const ExceptionsMode CachedExceptionsMode;
+
/// The list of toolchain specific path prefixes to search for libraries.
path_list LibraryPaths;
@@ -318,6 +325,9 @@ public:
// Returns the RTTIMode for the toolchain with the current arguments.
RTTIMode getRTTIMode() const { return CachedRTTIMode; }
+ // Returns the ExceptionsMode for the toolchain with the current arguments.
+ ExceptionsMode getExceptionsMode() const { return CachedExceptionsMode; }
+
/// Return any implicit target and/or mode flag for an invocation of
/// the compiler driver as `ProgName`.
///
diff --git a/clang/include/clang/Frontend/ASTUnit.h b/clang/include/clang/Frontend/ASTUnit.h
index 6af712afdcb6..a2c1b25dd224 100644
--- a/clang/include/clang/Frontend/ASTUnit.h
+++ b/clang/include/clang/Frontend/ASTUnit.h
@@ -691,16 +691,19 @@ public:
/// lifetime is expected to extend past that of the returned ASTUnit.
///
/// \returns - The initialized ASTUnit or null if the AST failed to load.
- static std::unique_ptr<ASTUnit> LoadFromASTFile(
- const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
- WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- const FileSystemOptions &FileSystemOpts,
- std::shared_ptr<HeaderSearchOptions> HSOpts, bool OnlyLocalDecls = false,
- CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None,
- bool AllowASTWithCompilerErrors = false,
- bool UserFilesAreVolatile = false,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
- llvm::vfs::getRealFileSystem());
+ static std::unique_ptr<ASTUnit>
+ LoadFromASTFile(const std::string &Filename,
+ const PCHContainerReader &PCHContainerRdr, WhatToLoad ToLoad,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ const FileSystemOptions &FileSystemOpts,
+ std::shared_ptr<HeaderSearchOptions> HSOpts,
+ std::shared_ptr<LangOptions> LangOpts = nullptr,
+ bool OnlyLocalDecls = false,
+ CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None,
+ bool AllowASTWithCompilerErrors = false,
+ bool UserFilesAreVolatile = false,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
+ llvm::vfs::getRealFileSystem());
private:
/// Helper function for \c LoadFromCompilerInvocation() and
diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h
index ac2f940769fb..b97d0c636806 100644
--- a/clang/include/clang/Frontend/CompilerInstance.h
+++ b/clang/include/clang/Frontend/CompilerInstance.h
@@ -311,6 +311,9 @@ public:
LangOptions &getLangOpts() { return Invocation->getLangOpts(); }
const LangOptions &getLangOpts() const { return Invocation->getLangOpts(); }
+ std::shared_ptr<LangOptions> getLangOptsPtr() const {
+ return Invocation->getLangOptsPtr();
+ }
PreprocessorOptions &getPreprocessorOpts() {
return Invocation->getPreprocessorOpts();
diff --git a/clang/include/clang/Frontend/CompilerInvocation.h b/clang/include/clang/Frontend/CompilerInvocation.h
index c6528779bde7..8fc51e6ec03b 100644
--- a/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/clang/include/clang/Frontend/CompilerInvocation.h
@@ -271,6 +271,7 @@ public:
std::shared_ptr<PreprocessorOptions> getPreprocessorOptsPtr() {
return PPOpts;
}
+ std::shared_ptr<LangOptions> getLangOptsPtr() { return LangOpts; }
/// @}
/// Create a compiler invocation from a list of input options.
diff --git a/clang/include/clang/InstallAPI/HeaderFile.h b/clang/include/clang/InstallAPI/HeaderFile.h
index 6ccd944f8b01..fc64a43b3def 100644
--- a/clang/include/clang/InstallAPI/HeaderFile.h
+++ b/clang/include/clang/InstallAPI/HeaderFile.h
@@ -21,6 +21,8 @@
namespace clang::installapi {
enum class HeaderType {
+ /// Unset or unknown type.
+ Unknown,
/// Represents declarations accessible to all clients.
Public,
/// Represents declarations accessible to a disclosed set of clients.
@@ -41,6 +43,7 @@ class HeaderFile {
std::optional<clang::Language> Language;
public:
+ HeaderFile() = delete;
HeaderFile(StringRef FullPath, HeaderType Type,
StringRef IncludeName = StringRef(),
std::optional<clang::Language> Language = std::nullopt)
diff --git a/clang/include/clang/Sema/Scope.h b/clang/include/clang/Sema/Scope.h
index 9e81706cd2aa..e7f166fe3461 100644
--- a/clang/include/clang/Sema/Scope.h
+++ b/clang/include/clang/Sema/Scope.h
@@ -150,6 +150,9 @@ public:
/// template scope in between), the outer scope does not increase the
/// depth of recursion.
LambdaScope = 0x8000000,
+ /// This is the scope of an OpenACC Compute Construct, which restricts
+ /// jumping into/out of it.
+ OpenACCComputeConstructScope = 0x10000000,
};
private:
@@ -469,6 +472,14 @@ public:
return false;
}
+ /// Return true if this scope is a loop.
+ bool isLoopScope() const {
+ // 'switch' is the only loop that is not a 'break' scope as well, so we can
+ // just check BreakScope and not SwitchScope.
+ return (getFlags() & Scope::BreakScope) &&
+ !(getFlags() & Scope::SwitchScope);
+ }
+
/// Determines whether this scope is the OpenMP directive scope
bool isOpenMPDirectiveScope() const {
return (getFlags() & Scope::OpenMPDirectiveScope);
@@ -504,6 +515,12 @@ public:
return getFlags() & Scope::OpenMPOrderClauseScope;
}
+ /// Determine whether this scope is the statement associated with an OpenACC
+ /// Compute construct directive.
+ bool isOpenACCComputeConstructScope() const {
+ return getFlags() & Scope::OpenACCComputeConstructScope;
+ }
+
/// Determine whether this scope is a while/do/for statement, which can have
/// continue statements embedded into it.
bool isContinueScope() const {
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index fcccac10f473..e457694e4625 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -8540,7 +8540,7 @@ public:
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
- VarTemplateDecl *Template,
+ VarTemplateDecl *Template, NamedDecl *FoundD,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index d7cff49036cb..a560f274c43c 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -110,12 +110,16 @@ public:
/// that value is returned. Otherwise, returns NULL.
virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal val) = 0;
- /// Tries to get the minimal possible (integer) value of a given SVal. If the
- /// constraint manager cannot provide an useful answer, this returns NULL.
+ /// Tries to get the minimal possible (integer) value of a given SVal. This
+ /// always returns the value of a ConcreteInt, but may return NULL if the
+ /// value is symbolic and the constraint manager cannot provide a useful
+ /// answer.
virtual const llvm::APSInt *getMinValue(ProgramStateRef state, SVal val) = 0;
- /// Tries to get the maximal possible (integer) value of a given SVal. If the
- /// constraint manager cannot provide an useful answer, this returns NULL.
+ /// Tries to get the maximal possible (integer) value of a given SVal. This
+ /// always returns the value of a ConcreteInt, but may return NULL if the
+ /// value is symbolic and the constraint manager cannot provide a useful
+ /// answer.
virtual const llvm::APSInt *getMaxValue(ProgramStateRef state, SVal val) = 0;
/// Simplify symbolic expressions within a given SVal. Return an SVal
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 27e098619216..7f97d8ce9fb8 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -2510,6 +2510,12 @@ template <class Emitter>
bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
assert(!VD->isInvalidDecl() && "Trying to constant evaluate an invalid decl");
+ // Global variable we've already seen but that's uninitialized means
+ // evaluating the initializer failed. Just return failure.
+ if (std::optional<unsigned> Index = P.getGlobal(VD);
+ Index && !P.getPtrGlobal(*Index).isInitialized())
+ return false;
+
// Create and initialize the variable.
if (!this->visitVarDecl(VD))
return false;
diff --git a/clang/lib/AST/Interp/Descriptor.h b/clang/lib/AST/Interp/Descriptor.h
index ac8707a521e1..0f64d678f3ef 100644
--- a/clang/lib/AST/Interp/Descriptor.h
+++ b/clang/lib/AST/Interp/Descriptor.h
@@ -213,6 +213,9 @@ public:
bool isRecord() const { return !IsArray && ElemRecord; }
/// Checks if this is a dummy descriptor.
bool isDummy() const { return IsDummy; }
+
+ void dump() const;
+ void dump(llvm::raw_ostream &OS) const;
};
/// Bitfield tracking the initialisation status of elements of primitive arrays.
diff --git a/clang/lib/AST/Interp/Disasm.cpp b/clang/lib/AST/Interp/Disasm.cpp
index eba437e05f59..315ddb293044 100644
--- a/clang/lib/AST/Interp/Disasm.cpp
+++ b/clang/lib/AST/Interp/Disasm.cpp
@@ -16,6 +16,7 @@
#include "Opcode.h"
#include "PrimType.h"
#include "Program.h"
+#include "clang/AST/ASTDumperUtils.h"
#include "clang/AST/DeclCXX.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
@@ -55,7 +56,10 @@ inline IntegralAP<true> ReadArg<IntegralAP<true>>(Program &P, CodePtr &OpPC) {
LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
- OS << getName() << " " << (const void *)this << "\n";
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_GREEN, true});
+ OS << getName() << " " << (const void *)this << "\n";
+ }
OS << "frame size: " << getFrameSize() << "\n";
OS << "arg size: " << getArgSize() << "\n";
OS << "rvo: " << hasRVO() << "\n";
@@ -83,14 +87,75 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
LLVM_DUMP_METHOD void Program::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const {
- OS << ":: Program\n";
- OS << "Global Variables: " << Globals.size() << "\n";
- OS << "Functions: " << Funcs.size() << "\n";
- OS << "\n";
- for (auto &Func : Funcs) {
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BRIGHT_RED, true});
+ OS << "\n:: Program\n";
+ }
+
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::WHITE, true});
+ OS << "Total memory : " << Allocator.getTotalMemory() << " bytes\n";
+ OS << "Global Variables: " << Globals.size() << "\n";
+ }
+ unsigned GI = 0;
+ for (const Global *G : Globals) {
+ const Descriptor *Desc = G->block()->getDescriptor();
+ OS << GI << ": " << (void *)G->block() << " ";
+ {
+ Pointer GP = getPtrGlobal(GI);
+ ColorScope SC(OS, true,
+ GP.isInitialized()
+ ? TerminalColor{llvm::raw_ostream::GREEN, false}
+ : TerminalColor{llvm::raw_ostream::RED, false});
+ OS << (GP.isInitialized() ? "initialized " : "uninitialized ");
+ }
+ Desc->dump(OS);
+ OS << "\n";
+ ++GI;
+ }
+
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::WHITE, true});
+ OS << "Functions: " << Funcs.size() << "\n";
+ }
+ for (const auto &Func : Funcs) {
Func.second->dump();
}
- for (auto &Anon : AnonFuncs) {
+ for (const auto &Anon : AnonFuncs) {
Anon->dump();
}
}
+
+LLVM_DUMP_METHOD void Descriptor::dump() const {
+ dump(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+LLVM_DUMP_METHOD void Descriptor::dump(llvm::raw_ostream &OS) const {
+ // Source
+ {
+ ColorScope SC(OS, true, {llvm::raw_ostream::BLUE, true});
+ if (const auto *ND = dyn_cast_if_present<NamedDecl>(asDecl()))
+ OS << ND->getName();
+ else if (asExpr())
+ OS << "expr (TODO)";
+ }
+
+ // Print a few interesting bits about the descriptor.
+ if (isPrimitiveArray())
+ OS << " primitive-array";
+ else if (isCompositeArray())
+ OS << " composite-array";
+ else if (isRecord())
+ OS << " record";
+ else if (isPrimitive())
+ OS << " primitive";
+
+ if (isZeroSizeArray())
+ OS << " zero-size-arrary";
+ else if (isUnknownSizeArray())
+ OS << " unknown-size-array";
+
+ if (isDummy())
+ OS << " dummy";
+}
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index 82bc1f240cc5..b2fe70dc14f9 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -462,6 +462,10 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
if (S.getLangOpts().CPlusPlus11) {
const FunctionDecl *DiagDecl = F->getDecl();
+ // Invalid decls have been diagnosed before.
+ if (DiagDecl->isInvalidDecl())
+ return false;
+
// If this function is not constexpr because it is an inherited
// non-constexpr constructor, diagnose that directly.
const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
diff --git a/clang/lib/AST/Interp/Program.cpp b/clang/lib/AST/Interp/Program.cpp
index 61293a3fef47..86e18ede6381 100644
--- a/clang/lib/AST/Interp/Program.cpp
+++ b/clang/lib/AST/Interp/Program.cpp
@@ -102,7 +102,7 @@ unsigned Program::createGlobalString(const StringLiteral *S) {
return I;
}
-Pointer Program::getPtrGlobal(unsigned Idx) {
+Pointer Program::getPtrGlobal(unsigned Idx) const {
assert(Idx < Globals.size());
return Pointer(Globals[Idx]->block());
}
diff --git a/clang/lib/AST/Interp/Program.h b/clang/lib/AST/Interp/Program.h
index 364a63dbf477..045bf7ab7745 100644
--- a/clang/lib/AST/Interp/Program.h
+++ b/clang/lib/AST/Interp/Program.h
@@ -67,7 +67,7 @@ public:
unsigned createGlobalString(const StringLiteral *S);
/// Returns a pointer to a global.
- Pointer getPtrGlobal(unsigned Idx);
+ Pointer getPtrGlobal(unsigned Idx) const;
/// Returns the value of a global.
Block *getGlobal(unsigned Idx) {
@@ -190,6 +190,7 @@ private:
std::byte *data() { return B.data(); }
/// Return a pointer to the block.
Block *block() { return &B; }
+ const Block *block() const { return &B; }
private:
/// Required metadata - does not actually track pointers.
diff --git a/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
index c9ebffe6f378..8aed19544be6 100644
--- a/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
+++ b/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
@@ -39,8 +39,35 @@ buildStmtToBasicBlockMap(const CFG &Cfg) {
StmtToBlock[Stmt->getStmt()] = Block;
}
- if (const Stmt *TerminatorStmt = Block->getTerminatorStmt())
- StmtToBlock[TerminatorStmt] = Block;
+ }
+ // Some terminator conditions don't appear as a `CFGElement` anywhere else -
+ // for example, this is true if the terminator condition is a `&&` or `||`
+ // operator.
+ // We associate these conditions with the block the terminator appears in,
+ // but only if the condition has not already appeared as a regular
+ // `CFGElement`. (The `insert()` below does nothing if the key already exists
+ // in the map.)
+ for (const CFGBlock *Block : Cfg) {
+ if (Block != nullptr)
+ if (const Stmt *TerminatorCond = Block->getTerminatorCondition())
+ StmtToBlock.insert({TerminatorCond, Block});
+ }
+ // Terminator statements typically don't appear as a `CFGElement` anywhere
+ // else, so we want to associate them with the block that they terminate.
+ // However, there are some important special cases:
+ // - The conditional operator is a type of terminator, but it also appears
+ // as a regular `CFGElement`, and we want to associate it with the block
+ // in which it appears as a `CFGElement`.
+ // - The `&&` and `||` operators are types of terminators, but like the
+ // conditional operator, they can appear as a regular `CFGElement` or
+ // as a terminator condition (see above).
+ // We process terminators last to make sure that we only associate them with
+ // the block they terminate if they haven't previously occurred as a regular
+ // `CFGElement` or as a terminator condition.
+ for (const CFGBlock *Block : Cfg) {
+ if (Block != nullptr)
+ if (const Stmt *TerminatorStmt = Block->getTerminatorStmt())
+ StmtToBlock.insert({TerminatorStmt, Block});
}
return StmtToBlock;
}
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 68032961451d..5abb060073c5 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -667,7 +667,13 @@ StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
}
bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
- return llvm::AArch64::parseArchExtension(FeatureStr).has_value();
+ // CPU features might be separated by '+', extract them and check
+ llvm::SmallVector<StringRef, 8> Features;
+ FeatureStr.split(Features, "+");
+ for (auto &Feature : Features)
+ if (!llvm::AArch64::parseArchExtension(Feature.trim()).has_value())
+ return false;
+ return true;
}
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index 26ee7fa19782..c1ba156860a1 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -165,7 +165,7 @@ public:
DiagnosticsEngine &Diags) override;
ParsedTargetAttr parseTargetAttr(StringRef Str) const override;
bool supportsTargetAttributeTune() const override { return true; }
-
+ bool supportsCpuSupports() const override { return true; }
bool checkArithmeticFenceSupported() const override { return true; }
bool hasBFloat16Type() const override;
diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp
index 8c891ccdeb59..aebe51bfa4da 100644
--- a/clang/lib/Basic/Targets/PPC.cpp
+++ b/clang/lib/Basic/Targets/PPC.cpp
@@ -904,6 +904,16 @@ bool PPCTargetInfo::validateCpuSupports(StringRef FeatureStr) const {
}
bool PPCTargetInfo::validateCpuIs(StringRef CPUName) const {
+ llvm::Triple Triple = getTriple();
+ if (Triple.isOSAIX()) {
+#define PPC_AIX_CPU(NAME, SUPPORT, INDEX, OP, VALUE) .Case(NAME, true)
+ return llvm::StringSwitch<bool>(CPUName)
+#include "llvm/TargetParser/PPCTargetParser.def"
+ .Default(false);
+ }
+
+ assert(Triple.isOSLinux() &&
+ "__builtin_cpu_is() is only supported for AIX and Linux.");
#define PPC_LNX_CPU(NAME, NUM) .Case(NAME, true)
return llvm::StringSwitch<bool>(CPUName)
#include "llvm/TargetParser/PPCTargetParser.def"
diff --git a/clang/lib/Basic/Targets/PPC.h b/clang/lib/Basic/Targets/PPC.h
index a91bdede53e4..70683916a8b0 100644
--- a/clang/lib/Basic/Targets/PPC.h
+++ b/clang/lib/Basic/Targets/PPC.h
@@ -362,8 +362,16 @@ public:
// We support __builtin_cpu_supports/__builtin_cpu_is on targets that
// have Glibc since it is Glibc that provides the HWCAP[2] in the auxv.
+ static constexpr int MINIMUM_AIX_OS_MAJOR = 7;
+ static constexpr int MINIMUM_AIX_OS_MINOR = 2;
bool supportsCpuSupports() const override { return getTriple().isOSGlibc(); }
- bool supportsCpuIs() const override { return getTriple().isOSGlibc(); }
+ bool supportsCpuIs() const override {
+ llvm::Triple Triple = getTriple();
+ // AIX 7.2 is the minimum requirement to support __builtin_cpu_is().
+ return Triple.isOSGlibc() ||
+ (Triple.isOSAIX() &&
+ !Triple.isOSVersionLT(MINIMUM_AIX_OS_MAJOR, MINIMUM_AIX_OS_MINOR));
+ }
bool validateCpuSupports(StringRef Feature) const override;
bool validateCpuIs(StringRef Name) const override;
};
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index d454ccc1dd86..734eb5a035ca 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -10638,6 +10638,9 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
BuiltinID <= clang::AArch64::LastSMEBuiltin)
return EmitAArch64SMEBuiltinExpr(BuiltinID, E);
+ if (BuiltinID == Builtin::BI__builtin_cpu_supports)
+ return EmitAArch64CpuSupports(E);
+
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
@@ -14025,6 +14028,19 @@ Value *CodeGenFunction::EmitX86CpuInit() {
return Builder.CreateCall(Func);
}
+Value *CodeGenFunction::EmitAArch64CpuSupports(const CallExpr *E) {
+ const Expr *ArgExpr = E->getArg(0)->IgnoreParenCasts();
+ StringRef ArgStr = cast<StringLiteral>(ArgExpr)->getString();
+ llvm::SmallVector<StringRef, 8> Features;
+ ArgStr.split(Features, "+");
+ for (auto &Feature : Features) {
+ Feature = Feature.trim();
+ if (Feature != "default")
+ Features.push_back(Feature);
+ }
+ return EmitAArch64CpuSupports(Features);
+}
+
llvm::Value *
CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) {
uint64_t FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeaturesStrs);
@@ -16542,12 +16558,59 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
+#include "llvm/TargetParser/PPCTargetParser.def"
+ auto GenAIXPPCBuiltinCpuExpr = [&](unsigned SupportMethod, unsigned FieldIdx,
+ unsigned CompOp,
+ unsigned OpValue) -> Value * {
+ if (SupportMethod == AIX_BUILTIN_PPC_FALSE)
+ return llvm::ConstantInt::getFalse(ConvertType(E->getType()));
+
+ if (SupportMethod == AIX_BUILTIN_PPC_TRUE)
+ return llvm::ConstantInt::getTrue(ConvertType(E->getType()));
+
+ assert(SupportMethod <= USE_SYS_CONF && "Invalid value for SupportMethod.");
+ assert((CompOp == COMP_EQ) && "Only equal comparisons are supported.");
+
+ llvm::Type *STy = llvm::StructType::get(PPC_SYSTEMCONFIG_TYPE);
+ llvm::Constant *SysConf =
+ CGM.CreateRuntimeVariable(STy, "_system_configuration");
+
+ // Grab the appropriate field from _system_configuration.
+ llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
+ ConstantInt::get(Int32Ty, FieldIdx)};
+
+ llvm::Value *FieldValue = Builder.CreateGEP(STy, SysConf, Idxs);
+ FieldValue = Builder.CreateAlignedLoad(Int32Ty, FieldValue,
+ CharUnits::fromQuantity(4));
+ assert(FieldValue->getType()->isIntegerTy(32) &&
+ "Only 32-bit integers are supported in GenAIXPPCBuiltinCpuExpr().");
+ return Builder.CreateICmp(ICmpInst::ICMP_EQ, FieldValue,
+ ConstantInt::get(Int32Ty, OpValue));
+ };
+
switch (BuiltinID) {
default: return nullptr;
case Builtin::BI__builtin_cpu_is: {
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
+ llvm::Triple Triple = getTarget().getTriple();
+
+ if (Triple.isOSAIX()) {
+ unsigned IsCpuSupport, FieldIdx, CompareOp, CpuIdValue;
+ typedef std::tuple<unsigned, unsigned, unsigned, unsigned> CPUType;
+ std::tie(IsCpuSupport, FieldIdx, CompareOp, CpuIdValue) =
+ static_cast<CPUType>(StringSwitch<CPUType>(CPUStr)
+#define PPC_AIX_CPU(NAME, SUPPORT_MAGIC, INDEX, COMPARE_OP, VALUE) \
+ .Case(NAME, {SUPPORT_MAGIC, INDEX, COMPARE_OP, VALUE})
+#include "llvm/TargetParser/PPCTargetParser.def"
+ );
+ return GenAIXPPCBuiltinCpuExpr(IsCpuSupport, FieldIdx, CompareOp,
+ CpuIdValue);
+ }
+
+ assert(Triple.isOSLinux() &&
+ "__builtin_cpu_is() is only supported for AIX and Linux.");
unsigned NumCPUID = StringSwitch<unsigned>(CPUStr)
#define PPC_LNX_CPU(Name, NumericID) .Case(Name, NumericID)
#include "llvm/TargetParser/PPCTargetParser.def"
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 5b43272bfa62..49f93451db7b 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -760,10 +760,10 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// to contain the fat binary but will be populated somewhere else,
// e.g. by lld through link script.
FatBinStr = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty,
- /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
- "__hip_fatbin", nullptr,
- llvm::GlobalVariable::NotThreadLocal);
+ CGM.getModule(), CGM.Int8Ty,
+ /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr,
+ "__hip_fatbin_" + CGM.getContext().getCUIDHash(), nullptr,
+ llvm::GlobalVariable::NotThreadLocal);
cast<llvm::GlobalVariable>(FatBinStr)->setSection(FatbinConstantName);
}
@@ -816,8 +816,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// thread safety of the loaded program. Therefore we can assume sequential
// execution of constructor functions here.
if (IsHIP) {
- auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage :
- llvm::GlobalValue::LinkOnceAnyLinkage;
+ auto Linkage = CudaGpuBinary ? llvm::GlobalValue::InternalLinkage
+ : llvm::GlobalValue::ExternalLinkage;
llvm::BasicBlock *IfBlock =
llvm::BasicBlock::Create(Context, "if", ModuleCtorFunc);
llvm::BasicBlock *ExitBlock =
@@ -826,11 +826,11 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// of HIP ABI.
GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, PtrTy, /*isConstant=*/false, Linkage,
- /*Initializer=*/llvm::ConstantPointerNull::get(PtrTy),
- "__hip_gpubin_handle");
- if (Linkage == llvm::GlobalValue::LinkOnceAnyLinkage)
- GpuBinaryHandle->setComdat(
- CGM.getModule().getOrInsertComdat(GpuBinaryHandle->getName()));
+ /*Initializer=*/
+ CudaGpuBinary ? llvm::ConstantPointerNull::get(PtrTy) : nullptr,
+ CudaGpuBinary
+ ? "__hip_gpubin_handle"
+ : "__hip_gpubin_handle_" + CGM.getContext().getCUIDHash());
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
// Prevent the weak symbol in different shared libraries being merged.
if (Linkage != llvm::GlobalValue::InternalLinkage)
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index caa6a327550b..92ce0edeaf9e 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -5013,10 +5013,10 @@ private:
llvm::Value *EmitAArch64CpuInit();
llvm::Value *
FormAArch64ResolverCondition(const MultiVersionResolverOption &RO);
+ llvm::Value *EmitAArch64CpuSupports(const CallExpr *E);
llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
};
-
inline DominatingLLVMValue::saved_type
DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
if (!needsSaving(value)) return saved_type(value, false);
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 77fb3a62b356..95e457bef28e 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -915,7 +915,15 @@ void CodeGenModule::Release() {
llvm::ConstantArray::get(ATy, UsedArray), "__clang_gpu_used_external");
addCompilerUsedGlobal(GV);
}
-
+ if (LangOpts.HIP) {
+ // Emit a unique ID so that host and device binaries from the same
+ // compilation unit can be associated.
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), Int8Ty, false, llvm::GlobalValue::ExternalLinkage,
+ llvm::Constant::getNullValue(Int8Ty),
+ "__hip_cuid_" + getContext().getCUIDHash());
+ addCompilerUsedGlobal(GV);
+ }
emitLLVMUsed();
if (SanStats)
SanStats->finish();
diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp
index 48c5e68a3b7b..1ef7be3c7259 100644
--- a/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -239,9 +239,12 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
if (MCDCMaxCond == 0)
return true;
- /// At the top of the logical operator nest, reset the number of conditions.
- if (LogOpStack.empty())
+ /// At the top of the logical operator nest, reset the number of conditions,
+ /// also forget previously seen split nesting cases.
+ if (LogOpStack.empty()) {
NumCond = 0;
+ SplitNestedLogicalOp = false;
+ }
if (const Expr *E = dyn_cast<Expr>(S)) {
const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
@@ -292,7 +295,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
"contains an operation with a nested boolean expression. "
"Expression will not be covered");
Diag.Report(S->getBeginLoc(), DiagID);
- return false;
+ return true;
}
/// Was the maximum number of conditions encountered?
@@ -303,7 +306,7 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
"number of conditions (%0) exceeds max (%1). "
"Expression will not be covered");
Diag.Report(S->getBeginLoc(), DiagID) << NumCond << MCDCMaxCond;
- return false;
+ return true;
}
// Otherwise, allocate the number of bytes required for the bitmap
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index ee7f95084d2e..94f8e7be2ee6 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -8,6 +8,7 @@
#include "ABIInfoImpl.h"
#include "TargetInfo.h"
+#include "clang/Basic/DiagnosticFrontend.h"
using namespace clang;
using namespace clang::CodeGen;
@@ -155,6 +156,11 @@ public:
}
return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
}
+
+ void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args) const override;
};
class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
@@ -814,6 +820,43 @@ Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
/*allowHigherAlign*/ false);
}
+static bool isStreaming(const FunctionDecl *F) {
+ if (F->hasAttr<ArmLocallyStreamingAttr>())
+ return true;
+ if (const auto *T = F->getType()->getAs<FunctionProtoType>())
+ return T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMEnabledMask;
+ return false;
+}
+
+static bool isStreamingCompatible(const FunctionDecl *F) {
+ if (const auto *T = F->getType()->getAs<FunctionProtoType>())
+ return T->getAArch64SMEAttributes() &
+ FunctionType::SME_PStateSMCompatibleMask;
+ return false;
+}
+
+void AArch64TargetCodeGenInfo::checkFunctionCallABI(
+ CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
+ const FunctionDecl *Callee, const CallArgList &Args) const {
+ if (!Caller || !Callee || !Callee->hasAttr<AlwaysInlineAttr>())
+ return;
+
+ bool CallerIsStreaming = isStreaming(Caller);
+ bool CalleeIsStreaming = isStreaming(Callee);
+ bool CallerIsStreamingCompatible = isStreamingCompatible(Caller);
+ bool CalleeIsStreamingCompatible = isStreamingCompatible(Callee);
+
+ if (!CalleeIsStreamingCompatible &&
+ (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible))
+ CGM.getDiags().Report(CallLoc,
+ diag::err_function_always_inline_attribute_mismatch)
+ << Caller->getDeclName() << Callee->getDeclName() << "streaming";
+ if (auto *NewAttr = Callee->getAttr<ArmNewAttr>())
+ if (NewAttr->isNewZA())
+ CGM.getDiags().Report(CallLoc, diag::err_function_always_inline_new_za)
+ << Callee->getDeclName();
+}
+
std::unique_ptr<TargetCodeGenInfo>
CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM,
AArch64ABIKind Kind) {
diff --git a/clang/lib/Driver/OffloadBundler.cpp b/clang/lib/Driver/OffloadBundler.cpp
index b1091aca5616..99a34d25cfcd 100644
--- a/clang/lib/Driver/OffloadBundler.cpp
+++ b/clang/lib/Driver/OffloadBundler.cpp
@@ -588,8 +588,15 @@ public:
StringRef Content = *ContentOrErr;
// Copy fat object contents to the output when extracting host bundle.
- if (Content.size() == 1u && Content.front() == 0)
- Content = StringRef(Input.getBufferStart(), Input.getBufferSize());
+ std::string ModifiedContent;
+ if (Content.size() == 1u && Content.front() == 0) {
+ auto HostBundleOrErr = getHostBundle();
+ if (!HostBundleOrErr)
+ return HostBundleOrErr.takeError();
+
+ ModifiedContent = std::move(*HostBundleOrErr);
+ Content = ModifiedContent;
+ }
OS.write(Content.data(), Content.size());
return Error::success();
@@ -692,6 +699,35 @@ private:
}
return Error::success();
}
+
+ Expected<std::string> getHostBundle() {
+ TempFileHandlerRAII TempFiles;
+
+ auto ModifiedObjPathOrErr = TempFiles.Create(std::nullopt);
+ if (!ModifiedObjPathOrErr)
+ return ModifiedObjPathOrErr.takeError();
+ StringRef ModifiedObjPath = *ModifiedObjPathOrErr;
+
+ BumpPtrAllocator Alloc;
+ StringSaver SS{Alloc};
+ SmallVector<StringRef, 16> ObjcopyArgs{"llvm-objcopy"};
+
+ ObjcopyArgs.push_back("--regex");
+ ObjcopyArgs.push_back("--remove-section=__CLANG_OFFLOAD_BUNDLE__.*");
+ ObjcopyArgs.push_back("--");
+ ObjcopyArgs.push_back(BundlerConfig.InputFileNames.front());
+ ObjcopyArgs.push_back(ModifiedObjPath);
+
+ if (Error Err = executeObjcopy(BundlerConfig.ObjcopyPath, ObjcopyArgs))
+ return std::move(Err);
+
+ auto BufOrErr = MemoryBuffer::getFile(ModifiedObjPath);
+ if (!BufOrErr)
+ return createStringError(BufOrErr.getError(),
+ "Failed to read back the modified object file");
+
+ return BufOrErr->get()->getBuffer().str();
+ }
};
/// Handler for text files. The bundled file will have the following format.
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 388030592b48..f8c13c86daf9 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -77,10 +77,19 @@ static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
return NoRTTI ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
}
+static ToolChain::ExceptionsMode CalculateExceptionsMode(const ArgList &Args) {
+ if (Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
+ true)) {
+ return ToolChain::EM_Enabled;
+ }
+ return ToolChain::EM_Disabled;
+}
+
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
- CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
+ CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)),
+ CachedExceptionsMode(CalculateExceptionsMode(Args)) {
auto addIfExists = [this](path_list &List, const std::string &Path) {
if (getVFS().exists(Path))
List.push_back(Path);
@@ -264,6 +273,18 @@ ToolChain::getMultilibFlags(const llvm::opt::ArgList &Args) const {
break;
}
+ // Include fno-exceptions and fno-rtti
+ // to improve multilib selection
+ if (getRTTIMode() == ToolChain::RTTIMode::RM_Disabled)
+ Result.push_back("-fno-rtti");
+ else
+ Result.push_back("-frtti");
+
+ if (getExceptionsMode() == ToolChain::ExceptionsMode::EM_Disabled)
+ Result.push_back("-fno-exceptions");
+ else
+ Result.push_back("-fexceptions");
+
// Sort and remove duplicates.
std::sort(Result.begin(), Result.end());
Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index e6ee2f88a84e..ba158b92bb44 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -890,25 +890,25 @@ fp16_fml_fallthrough:
// SCTLR.U bit, which is architecture-specific. We assume ARMv6
// Darwin and NetBSD targets support unaligned accesses, and others don't.
//
- // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
- // which raises an alignment fault on unaligned accesses. Linux
- // defaults this bit to 0 and handles it as a system-wide (not
- // per-process) setting. It is therefore safe to assume that ARMv7+
- // Linux targets support unaligned accesses. The same goes for NaCl
- // and Windows.
+ // ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit which
+ // raises an alignment fault on unaligned accesses. Assume ARMv7+ supports
+ // unaligned accesses, except ARMv6-M, and ARMv8-M without the Main
+ // Extension. This aligns with the default behavior of ARM's downstream
+ // versions of GCC and Clang.
//
- // The above behavior is consistent with GCC.
+ // Users can change the default behavior via -m[no-]unaliged-access.
int VersionNum = getARMSubArchVersionNumber(Triple);
if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
if (VersionNum < 6 ||
Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
Features.push_back("+strict-align");
- } else if (Triple.isOSLinux() || Triple.isOSNaCl() ||
- Triple.isOSWindows()) {
- if (VersionNum < 7)
- Features.push_back("+strict-align");
- } else
+ } else if (VersionNum < 7 ||
+ Triple.getSubArch() ==
+ llvm::Triple::SubArchType::ARMSubArch_v6m ||
+ Triple.getSubArch() ==
+ llvm::Triple::SubArchType::ARMSubArch_v8m_baseline) {
Features.push_back("+strict-align");
+ }
}
// llvm does not support reserving registers in general. There is support
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index e5196bd8b5ae..347b250260c4 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -1087,10 +1087,41 @@ static void addOpenMPDeviceLibC(const ToolChain &TC, const ArgList &Args,
"llvm-libc-decls");
bool HasLibC = llvm::sys::fs::exists(LibCDecls) &&
llvm::sys::fs::is_directory(LibCDecls);
- if (Args.hasFlag(options::OPT_gpulibc, options::OPT_nogpulibc, HasLibC)) {
- CmdArgs.push_back("-lcgpu");
- CmdArgs.push_back("-lmgpu");
+ if (!Args.hasFlag(options::OPT_gpulibc, options::OPT_nogpulibc, HasLibC))
+ return;
+
+ // We don't have access to the offloading toolchains here, so determine from
+ // the arguments if we have any active NVPTX or AMDGPU toolchains.
+ llvm::DenseSet<const char *> Libraries;
+ if (const Arg *Targets = Args.getLastArg(options::OPT_fopenmp_targets_EQ)) {
+ if (llvm::any_of(Targets->getValues(),
+ [](auto S) { return llvm::Triple(S).isAMDGPU(); })) {
+ Libraries.insert("-lcgpu-amdgpu");
+ Libraries.insert("-lmgpu-amdgpu");
+ }
+ if (llvm::any_of(Targets->getValues(),
+ [](auto S) { return llvm::Triple(S).isNVPTX(); })) {
+ Libraries.insert("-lcgpu-nvptx");
+ Libraries.insert("-lmgpu-nvptx");
+ }
}
+
+ for (StringRef Arch : Args.getAllArgValues(options::OPT_offload_arch_EQ)) {
+ if (llvm::any_of(llvm::split(Arch, ","), [](StringRef Str) {
+ return IsAMDGpuArch(StringToCudaArch(Str));
+ })) {
+ Libraries.insert("-lcgpu-amdgpu");
+ Libraries.insert("-lmgpu-amdgpu");
+ }
+ if (llvm::any_of(llvm::split(Arch, ","), [](StringRef Str) {
+ return IsNVIDIAGpuArch(StringToCudaArch(Str));
+ })) {
+ Libraries.insert("-lcgpu-nvptx");
+ Libraries.insert("-lmgpu-nvptx");
+ }
+ }
+
+ llvm::append_range(CmdArgs, Libraries);
}
void tools::addOpenMPRuntimeLibraryPath(const ToolChain &TC,
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index ed5924c3b73b..94d4982d102b 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -623,35 +623,34 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
continue;
}
- // Currently, we only pass the input files to the linker, we do not pass
- // any libraries that may be valid only for the host.
- if (!II.isFilename())
- continue;
-
// The 'nvlink' application performs RDC-mode linking when given a '.o'
// file and device linking when given a '.cubin' file. We always want to
// perform device linking, so just rename any '.o' files.
// FIXME: This should hopefully be removed if NVIDIA updates their tooling.
- auto InputFile = getToolChain().getInputFilename(II);
- if (llvm::sys::path::extension(InputFile) != ".cubin") {
- // If there are no actions above this one then this is direct input and we
- // can copy it. Otherwise the input is internal so a `.cubin` file should
- // exist.
- if (II.getAction() && II.getAction()->getInputs().size() == 0) {
- const char *CubinF =
- Args.MakeArgString(getToolChain().getDriver().GetTemporaryPath(
- llvm::sys::path::stem(InputFile), "cubin"));
- if (llvm::sys::fs::copy_file(InputFile, C.addTempFile(CubinF)))
- continue;
+ if (II.isFilename()) {
+ auto InputFile = getToolChain().getInputFilename(II);
+ if (llvm::sys::path::extension(InputFile) != ".cubin") {
+ // If there are no actions above this one then this is direct input and
+ // we can copy it. Otherwise the input is internal so a `.cubin` file
+ // should exist.
+ if (II.getAction() && II.getAction()->getInputs().size() == 0) {
+ const char *CubinF =
+ Args.MakeArgString(getToolChain().getDriver().GetTemporaryPath(
+ llvm::sys::path::stem(InputFile), "cubin"));
+ if (llvm::sys::fs::copy_file(InputFile, C.addTempFile(CubinF)))
+ continue;
- CmdArgs.push_back(CubinF);
+ CmdArgs.push_back(CubinF);
+ } else {
+ SmallString<256> Filename(InputFile);
+ llvm::sys::path::replace_extension(Filename, "cubin");
+ CmdArgs.push_back(Args.MakeArgString(Filename));
+ }
} else {
- SmallString<256> Filename(InputFile);
- llvm::sys::path::replace_extension(Filename, "cubin");
- CmdArgs.push_back(Args.MakeArgString(Filename));
+ CmdArgs.push_back(Args.MakeArgString(InputFile));
}
- } else {
- CmdArgs.push_back(Args.MakeArgString(InputFile));
+ } else if (!II.isNothing()) {
+ II.getInputArg().renderAsInput(Args, CmdArgs);
}
}
diff --git a/clang/lib/Driver/ToolChains/HIPUtility.cpp b/clang/lib/Driver/ToolChains/HIPUtility.cpp
index f692458b775d..fcecf2e1313b 100644
--- a/clang/lib/Driver/ToolChains/HIPUtility.cpp
+++ b/clang/lib/Driver/ToolChains/HIPUtility.cpp
@@ -9,13 +9,24 @@
#include "HIPUtility.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
+#include <deque>
+#include <set>
+using namespace clang;
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace llvm::opt;
+using llvm::dyn_cast;
#if defined(_WIN32) || defined(_WIN64)
#define NULL_FILE "nul"
@@ -36,6 +47,169 @@ static std::string normalizeForBundler(const llvm::Triple &T,
: T.normalize();
}
+// Collect undefined __hip_fatbin* and __hip_gpubin_handle* symbols from all
+// input object or archive files.
+class HIPUndefinedFatBinSymbols {
+public:
+ HIPUndefinedFatBinSymbols(const Compilation &C)
+ : C(C), DiagID(C.getDriver().getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Error collecting HIP undefined fatbin symbols: %0")),
+ Quiet(C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)),
+ Verbose(C.getArgs().hasArg(options::OPT_v)) {
+ populateSymbols();
+ if (Verbose) {
+ for (auto Name : FatBinSymbols)
+ llvm::errs() << "Found undefined HIP fatbin symbol: " << Name << "\n";
+ for (auto Name : GPUBinHandleSymbols)
+ llvm::errs() << "Found undefined HIP gpubin handle symbol: " << Name
+ << "\n";
+ }
+ }
+
+ const std::set<std::string> &getFatBinSymbols() const {
+ return FatBinSymbols;
+ }
+
+ const std::set<std::string> &getGPUBinHandleSymbols() const {
+ return GPUBinHandleSymbols;
+ }
+
+private:
+ const Compilation &C;
+ unsigned DiagID;
+ bool Quiet;
+ bool Verbose;
+ std::set<std::string> FatBinSymbols;
+ std::set<std::string> GPUBinHandleSymbols;
+ std::set<std::string> DefinedFatBinSymbols;
+ std::set<std::string> DefinedGPUBinHandleSymbols;
+ const std::string FatBinPrefix = "__hip_fatbin";
+ const std::string GPUBinHandlePrefix = "__hip_gpubin_handle";
+
+ void populateSymbols() {
+ std::deque<const Action *> WorkList;
+ std::set<const Action *> Visited;
+
+ for (const auto &Action : C.getActions())
+ WorkList.push_back(Action);
+
+ while (!WorkList.empty()) {
+ const Action *CurrentAction = WorkList.front();
+ WorkList.pop_front();
+
+ if (!CurrentAction || !Visited.insert(CurrentAction).second)
+ continue;
+
+ if (const auto *IA = dyn_cast<InputAction>(CurrentAction)) {
+ std::string ID = IA->getId().str();
+ if (!ID.empty()) {
+ ID = llvm::utohexstr(llvm::MD5Hash(ID), /*LowerCase=*/true);
+ FatBinSymbols.insert(Twine(FatBinPrefix + "_" + ID).str());
+ GPUBinHandleSymbols.insert(
+ Twine(GPUBinHandlePrefix + "_" + ID).str());
+ continue;
+ }
+ if (IA->getInputArg().getNumValues() == 0)
+ continue;
+ const char *Filename = IA->getInputArg().getValue();
+ if (!Filename)
+ continue;
+ auto BufferOrErr = llvm::MemoryBuffer::getFile(Filename);
+ // Input action could be options to linker, therefore, ignore it
+ // if cannot read it. If it turns out to be a file that cannot be read,
+ // the error will be caught by the linker.
+ if (!BufferOrErr)
+ continue;
+
+ processInput(BufferOrErr.get()->getMemBufferRef());
+ } else
+ WorkList.insert(WorkList.end(), CurrentAction->getInputs().begin(),
+ CurrentAction->getInputs().end());
+ }
+ }
+
+ void processInput(const llvm::MemoryBufferRef &Buffer) {
+ // Try processing as object file first.
+ auto ObjFileOrErr = llvm::object::ObjectFile::createObjectFile(Buffer);
+ if (ObjFileOrErr) {
+ processSymbols(**ObjFileOrErr);
+ return;
+ }
+
+ // Then try processing as archive files.
+ llvm::consumeError(ObjFileOrErr.takeError());
+ auto ArchiveOrErr = llvm::object::Archive::create(Buffer);
+ if (ArchiveOrErr) {
+ llvm::Error Err = llvm::Error::success();
+ llvm::object::Archive &Archive = *ArchiveOrErr.get();
+ for (auto &Child : Archive.children(Err)) {
+ auto ChildBufOrErr = Child.getMemoryBufferRef();
+ if (ChildBufOrErr)
+ processInput(*ChildBufOrErr);
+ else
+ errorHandler(ChildBufOrErr.takeError());
+ }
+
+ if (Err)
+ errorHandler(std::move(Err));
+ return;
+ }
+
+ // Ignore other files.
+ llvm::consumeError(ArchiveOrErr.takeError());
+ }
+
+ void processSymbols(const llvm::object::ObjectFile &Obj) {
+ for (const auto &Symbol : Obj.symbols()) {
+ auto FlagOrErr = Symbol.getFlags();
+ if (!FlagOrErr) {
+ errorHandler(FlagOrErr.takeError());
+ continue;
+ }
+
+ auto NameOrErr = Symbol.getName();
+ if (!NameOrErr) {
+ errorHandler(NameOrErr.takeError());
+ continue;
+ }
+ llvm::StringRef Name = *NameOrErr;
+
+ bool isUndefined =
+ FlagOrErr.get() & llvm::object::SymbolRef::SF_Undefined;
+ bool isFatBinSymbol = Name.starts_with(FatBinPrefix);
+ bool isGPUBinHandleSymbol = Name.starts_with(GPUBinHandlePrefix);
+
+ // Handling for defined symbols
+ if (!isUndefined) {
+ if (isFatBinSymbol) {
+ DefinedFatBinSymbols.insert(Name.str());
+ FatBinSymbols.erase(Name.str());
+ } else if (isGPUBinHandleSymbol) {
+ DefinedGPUBinHandleSymbols.insert(Name.str());
+ GPUBinHandleSymbols.erase(Name.str());
+ }
+ continue;
+ }
+
+ // Add undefined symbols if they are not in the defined sets
+ if (isFatBinSymbol &&
+ DefinedFatBinSymbols.find(Name.str()) == DefinedFatBinSymbols.end())
+ FatBinSymbols.insert(Name.str());
+ else if (isGPUBinHandleSymbol &&
+ DefinedGPUBinHandleSymbols.find(Name.str()) ==
+ DefinedGPUBinHandleSymbols.end())
+ GPUBinHandleSymbols.insert(Name.str());
+ }
+ }
+
+ void errorHandler(llvm::Error Err) {
+ if (Quiet)
+ return;
+ C.getDriver().Diag(DiagID) << llvm::toString(std::move(Err));
+ }
+};
+
// Construct a clang-offload-bundler command to bundle code objects for
// different devices into a HIP fat binary.
void HIP::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
@@ -130,26 +304,84 @@ void HIP::constructGenerateObjFileFromHIPFatBinary(
auto HostTriple =
C.getSingleOffloadToolChain<Action::OFK_Host>()->getTriple();
+ HIPUndefinedFatBinSymbols Symbols(C);
+
+ std::string PrimaryHipFatbinSymbol;
+ std::string PrimaryGpuBinHandleSymbol;
+ bool FoundPrimaryHipFatbinSymbol = false;
+ bool FoundPrimaryGpuBinHandleSymbol = false;
+
+ std::vector<std::string> AliasHipFatbinSymbols;
+ std::vector<std::string> AliasGpuBinHandleSymbols;
+
+ // Iterate through symbols to find the primary ones and collect others for
+ // aliasing
+ for (const auto &Symbol : Symbols.getFatBinSymbols()) {
+ if (!FoundPrimaryHipFatbinSymbol) {
+ PrimaryHipFatbinSymbol = Symbol;
+ FoundPrimaryHipFatbinSymbol = true;
+ } else
+ AliasHipFatbinSymbols.push_back(Symbol);
+ }
+
+ for (const auto &Symbol : Symbols.getGPUBinHandleSymbols()) {
+ if (!FoundPrimaryGpuBinHandleSymbol) {
+ PrimaryGpuBinHandleSymbol = Symbol;
+ FoundPrimaryGpuBinHandleSymbol = true;
+ } else
+ AliasGpuBinHandleSymbols.push_back(Symbol);
+ }
+
// Add MC directives to embed target binaries. We ensure that each
// section and image is 16-byte aligned. This is not mandatory, but
// increases the likelihood of data to be aligned with a cache block
// in several main host machines.
ObjStream << "# HIP Object Generator\n";
ObjStream << "# *** Automatically generated by Clang ***\n";
- if (HostTriple.isWindowsMSVCEnvironment()) {
- ObjStream << " .section .hip_fatbin, \"dw\"\n";
- } else {
- ObjStream << " .protected __hip_fatbin\n";
- ObjStream << " .type __hip_fatbin,@object\n";
- ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
+ if (FoundPrimaryGpuBinHandleSymbol) {
+ // Define the first gpubin handle symbol
+ if (HostTriple.isWindowsMSVCEnvironment())
+ ObjStream << " .section .hip_gpubin_handle,\"dw\"\n";
+ else {
+ ObjStream << " .protected " << PrimaryGpuBinHandleSymbol << "\n";
+ ObjStream << " .type " << PrimaryGpuBinHandleSymbol << ",@object\n";
+ ObjStream << " .section .hip_gpubin_handle,\"aw\"\n";
+ }
+ ObjStream << " .globl " << PrimaryGpuBinHandleSymbol << "\n";
+ ObjStream << " .p2align 3\n"; // Align 8
+ ObjStream << PrimaryGpuBinHandleSymbol << ":\n";
+ ObjStream << " .zero 8\n"; // Size 8
+
+ // Generate alias directives for other gpubin handle symbols
+ for (const auto &AliasSymbol : AliasGpuBinHandleSymbols) {
+ ObjStream << " .globl " << AliasSymbol << "\n";
+ ObjStream << " .set " << AliasSymbol << "," << PrimaryGpuBinHandleSymbol
+ << "\n";
+ }
+ }
+ if (FoundPrimaryHipFatbinSymbol) {
+ // Define the first fatbin symbol
+ if (HostTriple.isWindowsMSVCEnvironment())
+ ObjStream << " .section .hip_fatbin,\"dw\"\n";
+ else {
+ ObjStream << " .protected " << PrimaryHipFatbinSymbol << "\n";
+ ObjStream << " .type " << PrimaryHipFatbinSymbol << ",@object\n";
+ ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
+ }
+ ObjStream << " .globl " << PrimaryHipFatbinSymbol << "\n";
+ ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
+ << "\n";
+ // Generate alias directives for other fatbin symbols
+ for (const auto &AliasSymbol : AliasHipFatbinSymbols) {
+ ObjStream << " .globl " << AliasSymbol << "\n";
+ ObjStream << " .set " << AliasSymbol << "," << PrimaryHipFatbinSymbol
+ << "\n";
+ }
+ ObjStream << PrimaryHipFatbinSymbol << ":\n";
+ ObjStream << " .incbin ";
+ llvm::sys::printArg(ObjStream, BundleFile, /*Quote=*/true);
+ ObjStream << "\n";
}
- ObjStream << " .globl __hip_fatbin\n";
- ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
- << "\n";
- ObjStream << "__hip_fatbin:\n";
- ObjStream << " .incbin ";
- llvm::sys::printArg(ObjStream, BundleFile, /*Quote=*/true);
- ObjStream << "\n";
if (HostTriple.isOSLinux() && HostTriple.isOSBinFormatELF())
ObjStream << " .section .note.GNU-stack, \"\", @progbits\n";
ObjStream.flush();
diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index 85beb945cbf6..624099d21ae1 100644
--- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -86,6 +86,11 @@ RISCVToolChain::GetUnwindLibType(const llvm::opt::ArgList &Args) const {
return ToolChain::UNW_None;
}
+ToolChain::UnwindTableLevel RISCVToolChain::getDefaultUnwindTableLevel(
+ const llvm::opt::ArgList &Args) const {
+ return UnwindTableLevel::None;
+}
+
void RISCVToolChain::addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.h b/clang/lib/Driver/ToolChains/RISCVToolchain.h
index cec817ef7190..fa0aa265d842 100644
--- a/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -28,6 +28,8 @@ public:
RuntimeLibType GetDefaultRuntimeLibType() const override;
UnwindLibType
GetUnwindLibType(const llvm::opt::ArgList &Args) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
diff --git a/clang/lib/Format/MacroCallReconstructor.cpp b/clang/lib/Format/MacroCallReconstructor.cpp
index cbdd1683c54d..101acefdfe7a 100644
--- a/clang/lib/Format/MacroCallReconstructor.cpp
+++ b/clang/lib/Format/MacroCallReconstructor.cpp
@@ -33,7 +33,7 @@ void forEachToken(const UnwrappedLine &Line, const T &Call,
FormatToken *Parent = nullptr) {
bool First = true;
for (const auto &N : Line.Tokens) {
- Call(N.Tok, Parent, First);
+ Call(N.Tok, Parent, First, Line.Level);
First = false;
for (const auto &Child : N.Children)
forEachToken(Child, Call, N.Tok);
@@ -44,7 +44,7 @@ MacroCallReconstructor::MacroCallReconstructor(
unsigned Level,
const llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>>
&ActiveExpansions)
- : Level(Level), IdToReconstructed(ActiveExpansions) {
+ : Result(Level), IdToReconstructed(ActiveExpansions) {
Result.Tokens.push_back(std::make_unique<LineNode>());
ActiveReconstructedLines.push_back(&Result);
}
@@ -52,9 +52,8 @@ MacroCallReconstructor::MacroCallReconstructor(
void MacroCallReconstructor::addLine(const UnwrappedLine &Line) {
assert(State != Finalized);
LLVM_DEBUG(llvm::dbgs() << "MCR: new line...\n");
- forEachToken(Line, [&](FormatToken *Token, FormatToken *Parent, bool First) {
- add(Token, Parent, First);
- });
+ forEachToken(Line, [&](FormatToken *Token, FormatToken *Parent, bool First,
+ unsigned Level) { add(Token, Parent, First, Level); });
assert(InProgress || finished());
}
@@ -62,8 +61,8 @@ UnwrappedLine MacroCallReconstructor::takeResult() && {
finalize();
assert(Result.Tokens.size() == 1 &&
Result.Tokens.front()->Children.size() == 1);
- UnwrappedLine Final =
- createUnwrappedLine(*Result.Tokens.front()->Children.front(), Level);
+ UnwrappedLine Final = createUnwrappedLine(
+ *Result.Tokens.front()->Children.front(), Result.Level);
assert(!Final.Tokens.empty());
return Final;
}
@@ -72,7 +71,8 @@ UnwrappedLine MacroCallReconstructor::takeResult() && {
// ExpandedParent in the incoming unwrapped line. \p First specifies whether it
// is the first token in a given unwrapped line.
void MacroCallReconstructor::add(FormatToken *Token,
- FormatToken *ExpandedParent, bool First) {
+ FormatToken *ExpandedParent, bool First,
+ unsigned Level) {
LLVM_DEBUG(
llvm::dbgs() << "MCR: Token: " << Token->TokenText << ", Parent: "
<< (ExpandedParent ? ExpandedParent->TokenText : "<null>")
@@ -102,7 +102,7 @@ void MacroCallReconstructor::add(FormatToken *Token,
First = true;
}
- prepareParent(ExpandedParent, First);
+ prepareParent(ExpandedParent, First, Level);
if (Token->MacroCtx) {
// If this token was generated by a macro call, add the reconstructed
@@ -129,7 +129,7 @@ void MacroCallReconstructor::add(FormatToken *Token,
// is the parent of ActiveReconstructedLines.back() in the reconstructed
// unwrapped line.
void MacroCallReconstructor::prepareParent(FormatToken *ExpandedParent,
- bool NewLine) {
+ bool NewLine, unsigned Level) {
LLVM_DEBUG({
llvm::dbgs() << "ParentMap:\n";
debugParentMap();
@@ -172,7 +172,7 @@ void MacroCallReconstructor::prepareParent(FormatToken *ExpandedParent,
}
assert(!ActiveReconstructedLines.empty());
ActiveReconstructedLines.back()->Tokens.back()->Children.push_back(
- std::make_unique<ReconstructedLine>());
+ std::make_unique<ReconstructedLine>(Level));
ActiveReconstructedLines.push_back(
&*ActiveReconstructedLines.back()->Tokens.back()->Children.back());
} else if (parentLine().Tokens.back()->Tok != Parent) {
@@ -424,7 +424,8 @@ bool MacroCallReconstructor::processNextReconstructed() {
SpelledParentToReconstructedParent[MacroCallStructure.back()
.ParentLastToken] = Token;
appendToken(Token);
- prepareParent(Token, /*NewLine=*/true);
+ prepareParent(Token, /*NewLine=*/true,
+ MacroCallStructure.back().Line->Level);
Token->MacroParent = true;
return false;
}
@@ -435,7 +436,8 @@ bool MacroCallReconstructor::processNextReconstructed() {
[MacroCallStructure.back().Line->Tokens.back()->Tok] = Token;
Token->MacroParent = true;
appendToken(Token, MacroCallStructure.back().Line);
- prepareParent(Token, /*NewLine=*/true);
+ prepareParent(Token, /*NewLine=*/true,
+ MacroCallStructure.back().Line->Level);
return true;
}
if (Token->is(tok::r_paren)) {
@@ -509,16 +511,36 @@ MacroCallReconstructor::createUnwrappedLine(const ReconstructedLine &Line,
for (const auto &N : Line.Tokens) {
Result.Tokens.push_back(N->Tok);
UnwrappedLineNode &Current = Result.Tokens.back();
- for (const auto &Child : N->Children) {
- if (Child->Tokens.empty())
- continue;
- Current.Children.push_back(createUnwrappedLine(*Child, Level + 1));
- }
- if (Current.Children.size() == 1 &&
- Current.Tok->isOneOf(tok::l_paren, tok::comma)) {
- Result.Tokens.splice(Result.Tokens.end(),
- Current.Children.front().Tokens);
- Current.Children.clear();
+ auto NumChildren =
+ std::count_if(N->Children.begin(), N->Children.end(),
+ [](const auto &Child) { return !Child->Tokens.empty(); });
+ if (NumChildren == 1 && Current.Tok->isOneOf(tok::l_paren, tok::comma)) {
+ // If we only have one child, and the child is due to a macro expansion
+ // (either attached to a left parenthesis or comma), merge the child into
+ // the current line to prevent forced breaks for macro arguments.
+ auto *Child = std::find_if(
+ N->Children.begin(), N->Children.end(),
+ [](const auto &Child) { return !Child->Tokens.empty(); });
+ auto Line = createUnwrappedLine(**Child, Level);
+ Result.Tokens.splice(Result.Tokens.end(), Line.Tokens);
+ } else if (NumChildren > 0) {
+ // When there are multiple children with different indent, make sure that
+ // we indent them:
+ // 1. One level below the current line's level.
+ // 2. At the correct level relative to each other.
+ unsigned MinChildLevel =
+ std::min_element(N->Children.begin(), N->Children.end(),
+ [](const auto &E1, const auto &E2) {
+ return E1->Level < E2->Level;
+ })
+ ->get()
+ ->Level;
+ for (const auto &Child : N->Children) {
+ if (Child->Tokens.empty())
+ continue;
+ Current.Children.push_back(createUnwrappedLine(
+ *Child, Level + 1 + (Child->Level - MinChildLevel)));
+ }
}
}
return Result;
diff --git a/clang/lib/Format/Macros.h b/clang/lib/Format/Macros.h
index 1964624e828c..d2f7fe502364 100644
--- a/clang/lib/Format/Macros.h
+++ b/clang/lib/Format/Macros.h
@@ -231,8 +231,9 @@ public:
UnwrappedLine takeResult() &&;
private:
- void add(FormatToken *Token, FormatToken *ExpandedParent, bool First);
- void prepareParent(FormatToken *ExpandedParent, bool First);
+ void add(FormatToken *Token, FormatToken *ExpandedParent, bool First,
+ unsigned Level);
+ void prepareParent(FormatToken *ExpandedParent, bool First, unsigned Level);
FormatToken *getParentInResult(FormatToken *Parent);
void reconstruct(FormatToken *Token);
void startReconstruction(FormatToken *Token);
@@ -272,6 +273,8 @@ private:
// FIXME: Investigate changing UnwrappedLine to a pointer type and using it
// instead of rolling our own type.
struct ReconstructedLine {
+ explicit ReconstructedLine(unsigned Level) : Level(Level) {}
+ unsigned Level;
llvm::SmallVector<std::unique_ptr<LineNode>> Tokens;
};
@@ -373,9 +376,6 @@ private:
// \- )
llvm::SmallVector<MacroCallState> MacroCallStructure;
- // Level the generated UnwrappedLine will be at.
- const unsigned Level;
-
// Maps from identifier of the macro call to an unwrapped line containing
// all tokens of the macro call.
const llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>>
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index ec7b7f4dbe34..a60d6ae197a2 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -3817,7 +3817,7 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
do {
Tok = Tok->Next;
} while (Tok && Tok->isNot(TT_OverloadedOperatorLParen));
- if (!Tok)
+ if (!Tok || !Tok->MatchingParen)
break;
const auto *LeftParen = Tok;
for (Tok = Tok->Next; Tok && Tok != LeftParen->MatchingParen;
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index 8f6453a25d9d..3a424bdcde79 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -90,6 +90,12 @@ private:
} // end anonymous namespace
+std::ostream &operator<<(std::ostream &Stream, const UnwrappedLine &Line) {
+ llvm::raw_os_ostream OS(Stream);
+ printLine(OS, Line);
+ return Stream;
+}
+
class ScopedLineState {
public:
ScopedLineState(UnwrappedLineParser &Parser,
diff --git a/clang/lib/Format/UnwrappedLineParser.h b/clang/lib/Format/UnwrappedLineParser.h
index 739298690bbd..1403533a2d0e 100644
--- a/clang/lib/Format/UnwrappedLineParser.h
+++ b/clang/lib/Format/UnwrappedLineParser.h
@@ -420,6 +420,8 @@ struct UnwrappedLineNode {
SmallVector<UnwrappedLine, 0> Children;
};
+std::ostream &operator<<(std::ostream &Stream, const UnwrappedLine &Line);
+
} // end namespace format
} // end namespace clang
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index f09a01b5dd4a..3610a08831e7 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -540,7 +540,17 @@ public:
if (InitializedLanguage)
return false;
+ // FIXME: We did similar things in ReadHeaderSearchOptions too. But such
+ // style is not scaling. Probably we need to invite some mechanism to
+ // handle such patterns generally.
+ auto PICLevel = LangOpt.PICLevel;
+ auto PIE = LangOpt.PIE;
+
LangOpt = LangOpts;
+
+ LangOpt.PICLevel = PICLevel;
+ LangOpt.PIE = PIE;
+
InitializedLanguage = true;
updated();
@@ -790,7 +800,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts,
- std::shared_ptr<HeaderSearchOptions> HSOpts, bool OnlyLocalDecls,
+ std::shared_ptr<HeaderSearchOptions> HSOpts,
+ std::shared_ptr<LangOptions> LangOpts, bool OnlyLocalDecls,
CaptureDiagsKind CaptureDiagnostics, bool AllowASTWithCompilerErrors,
bool UserFilesAreVolatile, IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
@@ -804,7 +815,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
- AST->LangOpts = std::make_shared<LangOptions>();
+ AST->LangOpts = LangOpts ? LangOpts : std::make_shared<LangOptions>();
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index eff785b99a09..b9fd9b8897b7 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -689,7 +689,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
std::string(InputFile), CI.getPCHContainerReader(),
ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
- CI.getHeaderSearchOptsPtr());
+ CI.getHeaderSearchOptsPtr(), CI.getLangOptsPtr());
if (!AST)
return false;
diff --git a/clang/lib/Headers/arm_acle.h b/clang/lib/Headers/arm_acle.h
index 9cd34948e3c5..6e557eda1ddd 100644
--- a/clang/lib/Headers/arm_acle.h
+++ b/clang/lib/Headers/arm_acle.h
@@ -313,7 +313,7 @@ __qdbl(int32_t __t) {
}
#endif
-/* 8.4.3 Accumultating multiplications */
+/* 8.4.3 Accumulating multiplications */
#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlabb(int32_t __a, int32_t __b, int32_t __c) {
@@ -545,7 +545,7 @@ __usub16(uint16x2_t __a, uint16x2_t __b) {
}
#endif
-/* 8.5.10 Parallel 16-bit multiplications */
+/* 8.5.10 Parallel 16-bit multiplication */
#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
@@ -748,7 +748,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
-/* 10.3 Memory Tagging Extensions (MTE) Intrinsics */
+/* 10.3 MTE intrinsics */
#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
@@ -757,7 +757,7 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
-/* 18 Memory Operations Intrinsics */
+/* 18 memcpy family of operations intrinsics - MOPS */
#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
__builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
#endif
diff --git a/clang/lib/Headers/cpuid.h b/clang/lib/Headers/cpuid.h
index c968d37fb8cd..0bb9912b465f 100644
--- a/clang/lib/Headers/cpuid.h
+++ b/clang/lib/Headers/cpuid.h
@@ -219,6 +219,7 @@
#define bit_PREFETCHI 0x00004000
#define bit_USERMSR 0x00008000
#define bit_AVX10 0x00080000
+#define bit_APXF 0x00200000
/* Features in %eax for leaf 13 sub-leaf 1 */
#define bit_XSAVEOPT 0x00000001
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index fd262ff31e66..22ee60af4616 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -1385,6 +1385,16 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Diag(RAngleLoc,
diag::err_lambda_template_parameter_list_empty);
} else {
+ // We increase the template depth before recursing into a requires-clause.
+ //
+ // This depth is used for setting up a LambdaScopeInfo (in
+ // Sema::RecordParsingTemplateParameterDepth), which is used later when
+ // inventing template parameters in InventTemplateParameter.
+ //
+ // This way, abbreviated generic lambdas could have different template
+ // depths, avoiding substitution into the wrong template parameters during
+ // constraint satisfaction check.
+ ++CurTemplateDepthTracker;
ExprResult RequiresClause;
if (TryConsumeToken(tok::kw_requires)) {
RequiresClause =
@@ -1396,7 +1406,6 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Actions.ActOnLambdaExplicitTemplateParameterList(
Intro, LAngleLoc, TemplateParams, RAngleLoc, RequiresClause);
- ++CurTemplateDepthTracker;
}
}
diff --git a/clang/lib/Parse/ParseOpenACC.cpp b/clang/lib/Parse/ParseOpenACC.cpp
index 50e78e8687ae..4946a61fca00 100644
--- a/clang/lib/Parse/ParseOpenACC.cpp
+++ b/clang/lib/Parse/ParseOpenACC.cpp
@@ -560,6 +560,21 @@ bool doesDirectiveHaveAssociatedStmt(OpenACCDirectiveKind DirKind) {
llvm_unreachable("Unhandled directive->assoc stmt");
}
+unsigned getOpenACCScopeFlags(OpenACCDirectiveKind DirKind) {
+ switch (DirKind) {
+ case OpenACCDirectiveKind::Parallel:
+ // Mark this as a BreakScope/ContinueScope as well as a compute construct
+ // so that we can diagnose trying to 'break'/'continue' inside of one.
+ return Scope::BreakScope | Scope::ContinueScope |
+ Scope::OpenACCComputeConstructScope;
+ case OpenACCDirectiveKind::Invalid:
+ llvm_unreachable("Shouldn't be creating a scope for an invalid construct");
+ default:
+ break;
+ }
+ return 0;
+}
+
} // namespace
// OpenACC 3.3, section 1.7:
@@ -1228,6 +1243,8 @@ StmtResult Parser::ParseOpenACCDirectiveStmt() {
if (doesDirectiveHaveAssociatedStmt(DirInfo.DirKind)) {
ParsingOpenACCDirectiveRAII DirScope(*this, /*Value=*/false);
+ ParseScope ACCScope(this, getOpenACCScopeFlags(DirInfo.DirKind));
+
AssocStmt = getActions().ActOnOpenACCAssociatedStmt(DirInfo.DirKind,
ParseStatement());
}
diff --git a/clang/lib/Sema/Scope.cpp b/clang/lib/Sema/Scope.cpp
index 4570d8c615fe..cea6a62e3474 100644
--- a/clang/lib/Sema/Scope.cpp
+++ b/clang/lib/Sema/Scope.cpp
@@ -225,6 +225,7 @@ void Scope::dumpImpl(raw_ostream &OS) const {
{CompoundStmtScope, "CompoundStmtScope"},
{ClassInheritanceScope, "ClassInheritanceScope"},
{CatchScope, "CatchScope"},
+ {OpenACCComputeConstructScope, "OpenACCComputeConstructScope"},
};
for (auto Info : FlagInfo) {
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index e8bfb215a5b4..7fa295ebd940 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -2165,7 +2165,10 @@ static bool SemaBuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
return S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
<< SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
if (!IsCPUSupports && !TheTI->supportsCpuIs())
- return S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
+ return S.Diag(TheCall->getBeginLoc(),
+ TI.getTriple().isOSAIX()
+ ? diag::err_builtin_aix_os_unsupported
+ : diag::err_builtin_target_unsupported)
<< SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
Expr *Arg = TheCall->getArg(0)->IgnoreParenImpCasts();
@@ -19033,6 +19036,9 @@ static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
return false;
}
+ if (Field1->hasAttr<clang::NoUniqueAddressAttr>() ||
+ Field2->hasAttr<clang::NoUniqueAddressAttr>())
+ return false;
return true;
}
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index dde3bd84e89f..fcad09a63662 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -3356,6 +3356,14 @@ Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
// initialization of that variable.
return StmtError(Diag(ContinueLoc, diag::err_continue_from_cond_var_init));
}
+
+ // A 'continue' that would normally have execution continue on a block outside
+ // of a compute construct counts as 'branching out of' the compute construct,
+ // so diagnose here.
+ if (S->isOpenACCComputeConstructScope())
+ return StmtError(Diag(ContinueLoc, diag::err_acc_branch_in_out)
+ << /*out of */ 0);
+
CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S);
return new (Context) ContinueStmt(ContinueLoc);
@@ -3371,6 +3379,20 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
if (S->isOpenMPLoopScope())
return StmtError(Diag(BreakLoc, diag::err_omp_loop_cannot_use_stmt)
<< "break");
+
+ // OpenACC doesn't allow 'break'ing from a compute construct, so diagnose if
+ // we are trying to do so. This can come in 2 flavors: 1-the break'able thing
+ // (besides the compute construct) 'contains' the compute construct, at which
+ // point the 'break' scope will be the compute construct. Else it could be a
+ // loop of some sort that has a direct parent of the compute construct.
+ // However, a 'break' in a 'switch' marked as a compute construct doesn't
+ // count as 'branch out of' the compute construct.
+ if (S->isOpenACCComputeConstructScope() ||
+ (S->isLoopScope() && S->getParent() &&
+ S->getParent()->isOpenACCComputeConstructScope()))
+ return StmtError(Diag(BreakLoc, diag::err_acc_branch_in_out)
+ << /*out of */ 0);
+
CheckJumpOutOfSEHFinally(*this, BreakLoc, *S);
return new (Context) BreakStmt(BreakLoc);
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 1a975a8d0a0d..7d3d665194ad 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -4958,11 +4958,10 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
return Decl;
}
-ExprResult
-Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
- const DeclarationNameInfo &NameInfo,
- VarTemplateDecl *Template, SourceLocation TemplateLoc,
- const TemplateArgumentListInfo *TemplateArgs) {
+ExprResult Sema::CheckVarTemplateId(
+ const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
+ VarTemplateDecl *Template, NamedDecl *FoundD, SourceLocation TemplateLoc,
+ const TemplateArgumentListInfo *TemplateArgs) {
DeclResult Decl = CheckVarTemplateId(Template, TemplateLoc, NameInfo.getLoc(),
*TemplateArgs);
@@ -4978,8 +4977,7 @@ Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
NameInfo.getLoc());
// Build an ordinary singleton decl ref.
- return BuildDeclarationNameExpr(SS, NameInfo, Var,
- /*FoundD=*/nullptr, TemplateArgs);
+ return BuildDeclarationNameExpr(SS, NameInfo, Var, FoundD, TemplateArgs);
}
void Sema::diagnoseMissingTemplateArguments(TemplateName Name,
@@ -5066,9 +5064,9 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
bool KnownDependent = false;
// In C++1y, check variable template ids.
if (R.getAsSingle<VarTemplateDecl>()) {
- ExprResult Res = CheckVarTemplateId(SS, R.getLookupNameInfo(),
- R.getAsSingle<VarTemplateDecl>(),
- TemplateKWLoc, TemplateArgs);
+ ExprResult Res = CheckVarTemplateId(
+ SS, R.getLookupNameInfo(), R.getAsSingle<VarTemplateDecl>(),
+ R.getRepresentativeDecl(), TemplateKWLoc, TemplateArgs);
if (Res.isInvalid() || Res.isUsable())
return Res;
// Result is dependent. Carry on to build an UnresolvedLookupEpxr.
diff --git a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 05fc00a990d5..fdcc46e58580 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -268,6 +268,16 @@ getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
return std::pair<NonLoc, nonloc::ConcreteInt>(offset, extent);
}
+static bool isNegative(SValBuilder &SVB, ProgramStateRef State, NonLoc Value) {
+ const llvm::APSInt *MaxV = SVB.getMaxValue(State, Value);
+ return MaxV && MaxV->isNegative();
+}
+
+static bool isUnsigned(SValBuilder &SVB, NonLoc Value) {
+ QualType T = Value.getType(SVB.getContext());
+ return T->isUnsignedIntegerType();
+}
+
// Evaluate the comparison Value < Threshold with the help of the custom
// simplification algorithm defined for this checker. Return a pair of states,
// where the first one corresponds to "value below threshold" and the second
@@ -281,18 +291,32 @@ compareValueToThreshold(ProgramStateRef State, NonLoc Value, NonLoc Threshold,
if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
std::tie(Value, Threshold) = getSimplifiedOffsets(Value, *ConcreteThreshold, SVB);
}
- if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
- QualType T = Value.getType(SVB.getContext());
- if (T->isUnsignedIntegerType() && ConcreteThreshold->getValue().isNegative()) {
- // In this case we reduced the bound check to a comparison of the form
- // (symbol or value with unsigned type) < (negative number)
- // which is always false. We are handling these cases separately because
- // evalBinOpNN can perform a signed->unsigned conversion that turns the
- // negative number into a huge positive value and leads to wildly
- // inaccurate conclusions.
+
+ // We want to perform a _mathematical_ comparison between the numbers `Value`
+ // and `Threshold`; but `evalBinOpNN` evaluates a C/C++ operator that may
+ // perform automatic conversions. For example the number -1 is less than the
+ // number 1000, but -1 < `1000ull` will evaluate to `false` because the `int`
+ // -1 is converted to ULONGLONG_MAX.
+ // To avoid automatic conversions, we evaluate the "obvious" cases without
+ // calling `evalBinOpNN`:
+ if (isNegative(SVB, State, Value) && isUnsigned(SVB, Threshold)) {
+ if (CheckEquality) {
+ // negative_value == unsigned_value is always false
return {nullptr, State};
}
+ // negative_value < unsigned_value is always false
+ return {State, nullptr};
}
+ if (isUnsigned(SVB, Value) && isNegative(SVB, State, Threshold)) {
+ // unsigned_value == negative_value and unsigned_value < negative_value are
+ // both always false
+ return {nullptr, State};
+ }
+ // FIXME: these special cases are sufficient for handling real-world
+ // comparisons, but in theory there could be contrived situations where
+ // automatic conversion of a symbolic value (which can be negative and can be
+ // positive) leads to incorrect results.
+
const BinaryOperatorKind OpKind = CheckEquality ? BO_EQ : BO_LT;
auto BelowThreshold =
SVB.evalBinOpNN(State, OpKind, Value, Threshold, SVB.getConditionType())
diff --git a/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
index 265185e64107..18e718e08553 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
@@ -17,7 +17,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index a070f451694a..65bdc4cac309 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -21,6 +21,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/Sequence.h"
#include <functional>
#include <optional>
@@ -629,6 +630,21 @@ const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
return nullptr;
}
+static ProgramStateRef escapeArgs(ProgramStateRef State, CheckerContext &C,
+ const CallEvent &Call,
+ ArrayRef<unsigned int> EscapingArgs) {
+ const auto *CE = Call.getOriginExpr();
+
+ SmallVector<SVal> EscapingVals;
+ EscapingVals.reserve(EscapingArgs.size());
+ for (auto EscArgIdx : EscapingArgs)
+ EscapingVals.push_back(Call.getArgSVal(EscArgIdx));
+ State = State->invalidateRegions(EscapingVals, CE, C.blockCount(),
+ C.getLocationContext(),
+ /*CausesPointerEscape=*/false);
+ return State;
+}
+
//===----------------------------------------------------------------------===//
// Methods of StreamChecker.
//===----------------------------------------------------------------------===//
@@ -819,6 +835,11 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
return;
}
+ // At read, invalidate the buffer in any case of error or success,
+ // except if EOF was already present.
+ if (IsFread && !E.isStreamEof())
+ State = escapeArgs(State, C, Call, {0});
+
// Generate a transition for the success state.
// If we know the state to be FEOF at fread, do not add a success state.
if (!IsFread || !E.isStreamEof()) {
@@ -863,6 +884,9 @@ void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
return;
if (!E.isStreamEof()) {
+ // If there was already EOF, assume that read buffer is not changed.
+ // Otherwise it may change at success or failure.
+ State = escapeArgs(State, C, Call, {0});
if (SingleChar) {
// Generate a transition for the success state of `fgetc`.
NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
@@ -1011,6 +1035,14 @@ void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
State->BindExpr(E.CE, C.getLocationContext(), RetVal);
StateNotFailed =
E.assumeBinOpNN(StateNotFailed, BO_GE, RetVal, E.getZeroVal(Call));
+ if (!StateNotFailed)
+ return;
+
+ SmallVector<unsigned int> EscArgs;
+ for (auto EscArg : llvm::seq(2u, Call.getNumArgs()))
+ EscArgs.push_back(EscArg);
+ StateNotFailed = escapeArgs(StateNotFailed, C, Call, EscArgs);
+
if (StateNotFailed)
C.addTransition(StateNotFailed);
}
@@ -1073,8 +1105,12 @@ void StreamChecker::evalGetdelim(const FnDescription *Desc,
// return -1.
// If an error occurs, the function shall return -1 and set 'errno'.
- // Add transition for the successful state.
if (!E.isStreamEof()) {
+ // Escape buffer and size (may change by the call).
+ // May happen even at error (partial read?).
+ State = escapeArgs(State, C, Call, {0, 1});
+
+ // Add transition for the successful state.
NonLoc RetVal = makeRetVal(C, E.CE).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
State->BindExpr(E.CE, C.getLocationContext(), RetVal);
@@ -1161,6 +1197,7 @@ void StreamChecker::evalFgetpos(const FnDescription *Desc,
ProgramStateRef StateNotFailed, StateFailed;
std::tie(StateFailed, StateNotFailed) = E.makeRetValAndAssumeDual(State, C);
+ StateNotFailed = escapeArgs(StateNotFailed, C, Call, {1});
// This function does not affect the stream state.
// Still we add success and failure state with the appropriate return value.
diff --git a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
index 6de33da107a3..dec461296fed 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
+++ b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
@@ -13,7 +13,6 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/FoldingSet.h"
@@ -96,4 +95,4 @@ void handleConstructorAndAssignment(const CallEvent &Call, CheckerContext &C,
} // namespace clang::ento::tagged_union_modeling
-#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAGGEDUNIONMODELING_H \ No newline at end of file
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAGGEDUNIONMODELING_H
diff --git a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index d76fe4991869..87d255eeffc1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -164,12 +164,6 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
if (SizeV.isUnknown())
return nullptr;
- // Check if the size is tainted.
- if (isTainted(State, SizeV)) {
- reportTaintBug(SizeE, State, C, SizeV);
- return nullptr;
- }
-
// Check if the size is zero.
DefinedSVal SizeD = SizeV.castAs<DefinedSVal>();
@@ -192,10 +186,10 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
SVal LessThanZeroVal =
SVB.evalBinOp(State, BO_LT, SizeD, Zero, SVB.getConditionType());
+ ProgramStateRef StatePos, StateNeg;
if (std::optional<DefinedSVal> LessThanZeroDVal =
LessThanZeroVal.getAs<DefinedSVal>()) {
ConstraintManager &CM = C.getConstraintManager();
- ProgramStateRef StatePos, StateNeg;
std::tie(StateNeg, StatePos) = CM.assumeDual(State, *LessThanZeroDVal);
if (StateNeg && !StatePos) {
@@ -205,6 +199,12 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
State = StatePos;
}
+ // Check if the size is tainted.
+ if ((StateNeg || StateZero) && isTainted(State, SizeV)) {
+ reportTaintBug(SizeE, State, C, SizeV);
+ return nullptr;
+ }
+
return State;
}
@@ -218,7 +218,7 @@ void VLASizeChecker::reportTaintBug(const Expr *SizeE, ProgramStateRef State,
SmallString<256> buf;
llvm::raw_svector_ostream os(buf);
os << "Declared variable-length array (VLA) ";
- os << "has tainted size";
+ os << "has tainted (attacker controlled) size that can be 0 or negative";
auto report = std::make_unique<PathSensitiveBugReport>(TaintBT, os.str(), N);
report->addRange(SizeE->getSourceRange());
diff --git a/clang/test/AST/ast-dump-using.cpp b/clang/test/AST/ast-dump-using.cpp
index 5a4e910ffb86..8e5c60d3aabf 100644
--- a/clang/test/AST/ast-dump-using.cpp
+++ b/clang/test/AST/ast-dump-using.cpp
@@ -2,6 +2,7 @@
namespace a {
struct S;
+template <typename T> T x = {};
}
namespace b {
using a::S;
@@ -21,4 +22,10 @@ typedef S e; // check the same UsingType is reused.
// CHECK-NEXT: `-UsingType [[TYPE_ADDR]] 'a::S' sugar
// CHECK-NEXT: |-UsingShadow [[SHADOW_ADDR]] 'S'
// CHECK-NEXT: `-RecordType {{.*}} 'a::S'
+using a::x;
+
+void foo() {
+ x<int> = 3;
+ // CHECK: DeclRefExpr {{.*}} 'x' {{.*}} (UsingShadow {{.*}} 'x')
+}
}
diff --git a/clang/test/Analysis/out-of-bounds.c b/clang/test/Analysis/out-of-bounds.c
index ed457e869600..1f771c2b3bd1 100644
--- a/clang/test/Analysis/out-of-bounds.c
+++ b/clang/test/Analysis/out-of-bounds.c
@@ -186,3 +186,11 @@ void test_assume_after_access2(unsigned long x) {
clang_analyzer_eval(x <= 99); // expected-warning{{TRUE}}
}
+struct incomplete;
+char test_comparison_with_extent_symbol(struct incomplete *p) {
+ // Previously this was reported as a (false positive) overflow error because
+ // the extent symbol of the area pointed by `p` was an unsigned and the '-1'
+ // was converted to its type by `evalBinOpNN`.
+ return ((char *)p)[-1]; // no-warning
+}
+
diff --git a/clang/test/Analysis/stream-invalidate.c b/clang/test/Analysis/stream-invalidate.c
new file mode 100644
index 000000000000..6745d11a2fe7
--- /dev/null
+++ b/clang/test/Analysis/stream-invalidate.c
@@ -0,0 +1,147 @@
+// RUN: %clang_analyze_cc1 -verify %s \
+// RUN: -analyzer-checker=core \
+// RUN: -analyzer-checker=alpha.unix.Stream \
+// RUN: -analyzer-checker=debug.ExprInspection
+
+#include "Inputs/system-header-simulator.h"
+
+void clang_analyzer_eval(int);
+void clang_analyzer_dump(int);
+
+void test_fread(void) {
+ FILE *F = fopen("file", "r+");
+ if (!F)
+ return;
+
+ char Buf[3] = {10, 10, 10};
+ fread(Buf, 1, 3, F);
+ // The check applies to success and failure.
+ clang_analyzer_dump(Buf[0]); // expected-warning {{conj_$}} Should not preserve the previous value, thus should not be 10.
+ clang_analyzer_dump(Buf[2]); // expected-warning {{conj_$}}
+ if (feof(F)) {
+ char Buf1[3] = {10, 10, 10};
+ fread(Buf1, 1, 3, F); // expected-warning {{is in EOF state}}
+ clang_analyzer_dump(Buf1[0]); // expected-warning {{10 S32b}}
+ clang_analyzer_dump(Buf1[2]); // expected-warning {{10 S32b}}
+ }
+
+ fclose(F);
+}
+
+void test_fwrite(void) {
+ FILE *F = fopen("file", "r+");
+ if (!F)
+ return;
+
+ char Buf[3] = {10, 10, 10};
+ fwrite(Buf, 1, 3, F);
+ // The check applies to success and failure.
+ clang_analyzer_dump(Buf[0]); // expected-warning {{10 S32b}}
+ clang_analyzer_dump(Buf[2]); // expected-warning {{10 S32b}}
+
+ fclose(F);
+}
+
+void test_fgets() {
+ FILE *F = tmpfile();
+ if (!F)
+ return;
+
+ char Buf[3] = {10, 10, 10};
+ fgets(Buf, 3, F);
+ // The check applies to success and failure.
+ clang_analyzer_dump(Buf[0]); // expected-warning {{conj_$}} Should not preserve the previous value, thus should not be 10.
+ clang_analyzer_dump(Buf[2]); // expected-warning {{conj_$}}
+ if (feof(F)) {
+ char Buf1[3] = {10, 10, 10};
+ fgets(Buf1, 3, F); // expected-warning {{is in EOF state}}
+ clang_analyzer_dump(Buf1[0]); // expected-warning {{10 S32b}}
+ clang_analyzer_dump(Buf1[2]); // expected-warning {{10 S32b}}
+ }
+
+ fclose(F);
+}
+
+void test_fputs() {
+ FILE *F = tmpfile();
+ if (!F)
+ return;
+
+ char *Buf = "aaa";
+ fputs(Buf, F);
+ // The check applies to success and failure.
+ clang_analyzer_dump(Buf[0]); // expected-warning {{97 S32b}}
+ clang_analyzer_dump(Buf[2]); // expected-warning {{97 S32b}}
+ clang_analyzer_dump(Buf[3]); // expected-warning {{0 S32b}}
+
+ fclose(F);
+}
+
+void test_fscanf() {
+ FILE *F = tmpfile();
+ if (!F)
+ return;
+
+ int a = 1;
+ unsigned b;
+ int Ret = fscanf(F, "%d %u", &a, &b);
+ if (Ret == 0) {
+ clang_analyzer_dump(a); // expected-warning {{conj_$}}
+ // FIXME: should be {{1 S32b}}.
+ clang_analyzer_dump(b); // expected-warning {{conj_$}}
+ // FIXME: should be {{uninitialized value}}.
+ } else if (Ret == 1) {
+ clang_analyzer_dump(a); // expected-warning {{conj_$}}
+ clang_analyzer_dump(b); // expected-warning {{conj_$}}
+ // FIXME: should be {{uninitialized value}}.
+ } else if (Ret >= 2) {
+ clang_analyzer_dump(a); // expected-warning {{conj_$}}
+ clang_analyzer_dump(b); // expected-warning {{conj_$}}
+ clang_analyzer_eval(Ret == 2); // expected-warning {{FALSE}} expected-warning {{TRUE}}
+ // FIXME: should be only TRUE.
+ } else {
+ clang_analyzer_dump(a); // expected-warning {{1 S32b}}
+ clang_analyzer_dump(b); // expected-warning {{uninitialized value}}
+ }
+
+ fclose(F);
+}
+
+void test_getdelim(char *P, size_t Sz) {
+ FILE *F = tmpfile();
+ if (!F)
+ return;
+
+ char *P1 = P;
+ size_t Sz1 = Sz;
+ ssize_t Ret = getdelim(&P, &Sz, '\t', F);
+ if (Ret < 0) {
+ clang_analyzer_eval(P == P1); // expected-warning {{FALSE}} \
+ // expected-warning {{TRUE}}
+ clang_analyzer_eval(Sz == Sz1); // expected-warning {{FALSE}} \
+ // expected-warning {{TRUE}}
+ } else {
+ clang_analyzer_eval(P == P1); // expected-warning {{FALSE}} \
+ // expected-warning {{TRUE}}
+ clang_analyzer_eval(Sz == Sz1); // expected-warning {{FALSE}} \
+ // expected-warning {{TRUE}}
+ }
+
+ fclose(F);
+}
+
+void test_fgetpos() {
+ FILE *F = tmpfile();
+ if (!F)
+ return;
+
+ fpos_t Pos = 1;
+ int Ret = fgetpos(F, &Pos);
+ if (Ret == 0) {
+ clang_analyzer_dump(Pos); // expected-warning {{conj_$}}
+ } else {
+ clang_analyzer_dump(Pos); // expected-warning {{1 S32b}}
+ }
+
+ fclose(F);
+}
diff --git a/clang/test/Analysis/taint-diagnostic-visitor.c b/clang/test/Analysis/taint-diagnostic-visitor.c
index a3fa1639bffe..020e9579ac53 100644
--- a/clang/test/Analysis/taint-diagnostic-visitor.c
+++ b/clang/test/Analysis/taint-diagnostic-visitor.c
@@ -46,8 +46,8 @@ void taintDiagnosticVLA(void) {
scanf("%d", &x); // expected-note {{Value assigned to 'x'}}
// expected-note@-1 {{Taint originated here}}
// expected-note@-2 {{Taint propagated to the 2nd argument}}
- int vla[x]; // expected-warning {{Declared variable-length array (VLA) has tainted size}}
- // expected-note@-1 {{Declared variable-length array (VLA) has tainted size}}
+ int vla[x]; // expected-warning {{Declared variable-length array (VLA) has tainted}}
+ // expected-note@-1 {{Declared variable-length array (VLA) has tainted}}
}
diff --git a/clang/test/Analysis/taint-generic.c b/clang/test/Analysis/taint-generic.c
index 4ff474b2ed40..e85b4106a580 100644
--- a/clang/test/Analysis/taint-generic.c
+++ b/clang/test/Analysis/taint-generic.c
@@ -405,7 +405,16 @@ int testDivByZero(void) {
void testTaintedVLASize(void) {
int x;
scanf("%d", &x);
- int vla[x]; // expected-warning{{Declared variable-length array (VLA) has tainted size}}
+ int vla[x]; // expected-warning{{Declared variable-length array (VLA) has tainted (attacker controlled) size that can be 0 or negative}}
+}
+
+// Tainted-sanitized VLAs.
+void testTaintedSanitizedVLASize(void) {
+ int x;
+ scanf("%d", &x);
+ if (x<1)
+ return;
+ int vla[x]; // no-warning
}
int testTaintedAllocaMem() {
diff --git a/clang/test/CMakeLists.txt b/clang/test/CMakeLists.txt
index 6b5cb0a18457..fcfca354f4a7 100644
--- a/clang/test/CMakeLists.txt
+++ b/clang/test/CMakeLists.txt
@@ -136,6 +136,7 @@ if( NOT CLANG_BUILT_STANDALONE )
llvm-strip
llvm-symbolizer
llvm-windres
+ obj2yaml
opt
split-file
yaml2obj
diff --git a/clang/test/CXX/drs/dr27xx.cpp b/clang/test/CXX/drs/dr27xx.cpp
index dd3fd5a20163..c956c4355abd 100644
--- a/clang/test/CXX/drs/dr27xx.cpp
+++ b/clang/test/CXX/drs/dr27xx.cpp
@@ -1,15 +1,98 @@
-// RUN: %clang_cc1 -std=c++98 -verify=expected %s
-// RUN: %clang_cc1 -std=c++11 -verify=expected %s
-// RUN: %clang_cc1 -std=c++14 -verify=expected %s
-// RUN: %clang_cc1 -std=c++17 -verify=expected %s
-// RUN: %clang_cc1 -std=c++20 -verify=expected %s
-// RUN: %clang_cc1 -std=c++23 -verify=expected,since-cxx23 %s
-// RUN: %clang_cc1 -std=c++2c -verify=expected,since-cxx23,since-cxx26 %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++98 -verify=expected %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++11 -verify=expected %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++14 -verify=expected %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++17 -verify=expected %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++20 -verify=expected %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++23 -verify=expected,since-cxx23 %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -std=c++2c -verify=expected,since-cxx23,since-cxx26 %s
#if __cplusplus <= 202002L
// expected-no-diagnostics
#endif
+namespace dr2759 { // dr2759: 19
+#if __cplusplus >= 201103L
+
+struct CStruct {
+ int one;
+ int two;
+};
+
+struct CEmptyStruct {};
+struct CEmptyStruct2 {};
+
+struct CStructNoUniqueAddress {
+ int one;
+ [[no_unique_address]] int two;
+};
+
+struct CStructNoUniqueAddress2 {
+ int one;
+ [[no_unique_address]] int two;
+};
+
+union UnionLayout {
+ int a;
+ double b;
+ CStruct c;
+ [[no_unique_address]] CEmptyStruct d;
+ [[no_unique_address]] CEmptyStruct2 e;
+};
+
+union UnionLayout2 {
+ CStruct c;
+ int a;
+ CEmptyStruct2 e;
+ double b;
+ [[no_unique_address]] CEmptyStruct d;
+};
+
+union UnionLayout3 {
+ CStruct c;
+ int a;
+ double b;
+ [[no_unique_address]] CEmptyStruct d;
+};
+
+struct StructWithAnonUnion {
+ union {
+ int a;
+ double b;
+ CStruct c;
+ [[no_unique_address]] CEmptyStruct d;
+ [[no_unique_address]] CEmptyStruct2 e;
+ };
+};
+
+struct StructWithAnonUnion2 {
+ union {
+ CStruct c;
+ int a;
+ CEmptyStruct2 e;
+ double b;
+ [[no_unique_address]] CEmptyStruct d;
+ };
+};
+
+struct StructWithAnonUnion3 {
+ union {
+ CStruct c;
+ int a;
+ CEmptyStruct2 e;
+ double b;
+ [[no_unique_address]] CEmptyStruct d;
+ } u;
+};
+
+static_assert(__is_layout_compatible(CStruct, CStructNoUniqueAddress) != bool(__has_cpp_attribute(no_unique_address)), "");
+static_assert(__is_layout_compatible(CStructNoUniqueAddress, CStructNoUniqueAddress2) != bool(__has_cpp_attribute(no_unique_address)), "");
+static_assert(!__is_layout_compatible(UnionLayout, UnionLayout2), "");
+static_assert(!__is_layout_compatible(UnionLayout, UnionLayout3), "");
+static_assert(!__is_layout_compatible(StructWithAnonUnion, StructWithAnonUnion2), "");
+static_assert(!__is_layout_compatible(StructWithAnonUnion, StructWithAnonUnion3), "");
+#endif
+} // namespace dr2759
+
namespace dr2789 { // dr2789: 18
#if __cplusplus >= 202302L
template <typename T = int>
diff --git a/clang/test/CodeGen/aarch64-cpu-supports-target.c b/clang/test/CodeGen/aarch64-cpu-supports-target.c
new file mode 100644
index 000000000000..e023944b24e5
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-cpu-supports-target.c
@@ -0,0 +1,52 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+int check_all_feature() {
+ if (__builtin_cpu_supports("rng+flagm+flagm2+fp16fml+dotprod+sm4"))
+ return 1;
+ else if (__builtin_cpu_supports("rdm+lse+fp+simd+crc+sha1+sha2+sha3"))
+ return 2;
+ else if (__builtin_cpu_supports("aes+pmull+fp16+dit+dpb+dpb2+jscvt"))
+ return 3;
+ else if (__builtin_cpu_supports("fcma+rcpc+rcpc2+rcpc3+frintts+dgh"))
+ return 4;
+ else if (__builtin_cpu_supports("i8mm+bf16+ebf16+rpres+sve+sve-bf16"))
+ return 5;
+ else if (__builtin_cpu_supports("sve-ebf16+sve-i8mm+f32mm+f64mm"))
+ return 6;
+ else if (__builtin_cpu_supports("sve2+sve2-aes+sve2-pmull128"))
+ return 7;
+ else if (__builtin_cpu_supports("sve2-bitperm+sve2-sha3+sve2-sm4"))
+ return 8;
+ else if (__builtin_cpu_supports("sme+memtag+memtag2+memtag3+sb"))
+ return 9;
+ else if (__builtin_cpu_supports("predres+ssbs+ssbs2+bti+ls64+ls64_v"))
+ return 10;
+ else if (__builtin_cpu_supports("ls64_accdata+wfxt+sme-f64f64"))
+ return 11;
+ else if (__builtin_cpu_supports("sme-i16i64+sme2"))
+ return 12;
+ else
+ return 0;
+}
+
+// CHECK-LABEL: define dso_local i32 @neon_code() #1
+int __attribute__((target("simd"))) neon_code() { return 1; }
+
+// CHECK-LABEL: define dso_local i32 @sve_code() #2
+int __attribute__((target("sve"))) sve_code() { return 2; }
+
+// CHECK-LABEL: define dso_local i32 @code() #0
+int code() { return 3; }
+
+// CHECK-LABEL: define dso_local i32 @test_versions() #0
+int test_versions() {
+ if (__builtin_cpu_supports("sve"))
+ return sve_code();
+ else if (__builtin_cpu_supports("simd"))
+ return neon_code();
+ else
+ return code();
+}
+// CHECK: attributes #0 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+// CHECK: attributes #1 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+neon" }
+// CHECK: attributes #2 = { noinline nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-features"="+fp-armv8,+fullfp16,+neon,+sve" }
diff --git a/clang/test/CodeGen/aarch64-cpu-supports.c b/clang/test/CodeGen/aarch64-cpu-supports.c
new file mode 100644
index 000000000000..872fec6827ef
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-cpu-supports.c
@@ -0,0 +1,54 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals --version 2
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -emit-llvm -o - %s | FileCheck %s
+
+// CHECK: @__aarch64_cpu_features = external dso_local global { i64 }
+// CHECK-LABEL: define dso_local i32 @main
+// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
+// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 70368744177664
+// CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 70368744177664
+// CHECK-NEXT: [[TMP3:%.*]] = and i1 true, [[TMP2]]
+// CHECK-NEXT: br i1 [[TMP3]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+// CHECK: if.then:
+// CHECK-NEXT: store i32 1, ptr [[RETVAL]], align 4
+// CHECK-NEXT: br label [[RETURN:%.*]]
+// CHECK: if.end:
+// CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP5:%.*]] = and i64 [[TMP4]], 9070970929152
+// CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[TMP5]], 9070970929152
+// CHECK-NEXT: [[TMP7:%.*]] = and i1 true, [[TMP6]]
+// CHECK-NEXT: br i1 [[TMP7]], label [[IF_THEN1:%.*]], label [[IF_END2:%.*]]
+// CHECK: if.then1:
+// CHECK-NEXT: store i32 2, ptr [[RETVAL]], align 4
+// CHECK-NEXT: br label [[RETURN]]
+// CHECK: if.end2:
+// CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr @__aarch64_cpu_features, align 8
+// CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], 166633186212708352
+// CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[TMP9]], 166633186212708352
+// CHECK-NEXT: [[TMP11:%.*]] = and i1 true, [[TMP10]]
+// CHECK-NEXT: br i1 [[TMP11]], label [[IF_THEN3:%.*]], label [[IF_END4:%.*]]
+// CHECK: if.then3:
+// CHECK-NEXT: store i32 3, ptr [[RETVAL]], align 4
+// CHECK-NEXT: br label [[RETURN]]
+// CHECK: if.end4:
+// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4
+// CHECK-NEXT: br label [[RETURN]]
+// CHECK: return:
+// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4
+// CHECK-NEXT: ret i32 [[TMP12]]
+//
+int main(void) {
+ if (__builtin_cpu_supports("sb"))
+ return 1;
+
+ if (__builtin_cpu_supports("sve2-pmull128+memtag"))
+ return 2;
+
+ if (__builtin_cpu_supports("sme2+ls64_v+wfxt"))
+ return 3;
+
+ return 0;
+}
diff --git a/clang/test/CodeGen/aarch64-sme-inline-streaming-attrs.c b/clang/test/CodeGen/aarch64-sme-inline-streaming-attrs.c
new file mode 100644
index 000000000000..25aebeced937
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sme-inline-streaming-attrs.c
@@ -0,0 +1,47 @@
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -S -o /dev/null -target-feature +sme -verify -DTEST_NONE %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -S -o /dev/null -target-feature +sme -verify -DTEST_COMPATIBLE %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -S -o /dev/null -target-feature +sme -verify -DTEST_STREAMING %s
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -S -o /dev/null -target-feature +sme -verify -DTEST_LOCALLY %s
+
+#define __ai __attribute__((always_inline))
+__ai void inlined_fn(void) {}
+__ai void inlined_fn_streaming_compatible(void) __arm_streaming_compatible {}
+__ai void inlined_fn_streaming(void) __arm_streaming {}
+__ai __arm_locally_streaming void inlined_fn_local(void) {}
+
+#ifdef TEST_NONE
+void caller(void) {
+ inlined_fn();
+ inlined_fn_streaming_compatible();
+ inlined_fn_streaming(); // expected-error {{always_inline function 'inlined_fn_streaming' and its caller 'caller' have mismatching streaming attributes}}
+ inlined_fn_local(); // expected-error {{always_inline function 'inlined_fn_local' and its caller 'caller' have mismatching streaming attributes}}
+}
+#endif
+
+#ifdef TEST_COMPATIBLE
+void caller_compatible(void) __arm_streaming_compatible {
+ inlined_fn(); // expected-error {{always_inline function 'inlined_fn' and its caller 'caller_compatible' have mismatching streaming attributes}}
+ inlined_fn_streaming_compatible();
+ inlined_fn_streaming(); // expected-error {{always_inline function 'inlined_fn_streaming' and its caller 'caller_compatible' have mismatching streaming attributes}}
+ inlined_fn_local(); // expected-error {{always_inline function 'inlined_fn_local' and its caller 'caller_compatible' have mismatching streaming attributes}}
+}
+#endif
+
+#ifdef TEST_STREAMING
+void caller_streaming(void) __arm_streaming {
+ inlined_fn(); // expected-error {{always_inline function 'inlined_fn' and its caller 'caller_streaming' have mismatching streaming attributes}}
+ inlined_fn_streaming_compatible();
+ inlined_fn_streaming();
+ inlined_fn_local();
+}
+#endif
+
+#ifdef TEST_LOCALLY
+__arm_locally_streaming
+void caller_local(void) {
+ inlined_fn(); // expected-error {{always_inline function 'inlined_fn' and its caller 'caller_local' have mismatching streaming attributes}}
+ inlined_fn_streaming_compatible();
+ inlined_fn_streaming();
+ inlined_fn_local();
+}
+#endif
diff --git a/clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_state_funs.c b/clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_state_funs.c
index dc07efbb8160..e80a965394e7 100644
--- a/clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_state_funs.c
+++ b/clang/test/CodeGen/aarch64-sme-intrinsics/acle_sme_state_funs.c
@@ -28,12 +28,12 @@ bool test_in_streaming_mode(void) __arm_streaming_compatible {
// CHECK-LABEL: @test_za_disable(
// CHECK-NEXT: entry:
-// CHECK-NEXT: tail call void @__arm_za_disable() #[[ATTR4:[0-9]+]]
+// CHECK-NEXT: tail call void @__arm_za_disable() #[[ATTR3]]
// CHECK-NEXT: ret void
//
// CPP-CHECK-LABEL: @_Z15test_za_disablev(
// CPP-CHECK-NEXT: entry:
-// CPP-CHECK-NEXT: tail call void @__arm_za_disable() #[[ATTR4:[0-9]+]]
+// CPP-CHECK-NEXT: tail call void @__arm_za_disable() #[[ATTR3]]
// CPP-CHECK-NEXT: ret void
//
void test_za_disable(void) __arm_streaming_compatible {
@@ -70,3 +70,58 @@ void test_svundef_za(void) __arm_streaming_compatible __arm_out("za") {
svundef_za();
}
+// CHECK-LABEL: @test_sc_memcpy(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memcpy(ptr noundef [[DEST:%.*]], ptr noundef [[SRC:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CHECK-NEXT: ret ptr [[CALL]]
+//
+// CPP-CHECK-LABEL: @_Z14test_sc_memcpyPvPKvm(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memcpy(ptr noundef [[DEST:%.*]], ptr noundef [[SRC:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CPP-CHECK-NEXT: ret ptr [[CALL]]
+//
+void *test_sc_memcpy(void *dest, const void *src, size_t n) __arm_streaming_compatible {
+ return __arm_sc_memcpy(dest, src, n);
+}
+
+// CHECK-LABEL: @test_sc_memmove(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memmove(ptr noundef [[DEST:%.*]], ptr noundef [[SRC:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CHECK-NEXT: ret ptr [[CALL]]
+//
+// CPP-CHECK-LABEL: @_Z15test_sc_memmovePvPKvm(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memmove(ptr noundef [[DEST:%.*]], ptr noundef [[SRC:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CPP-CHECK-NEXT: ret ptr [[CALL]]
+//
+void *test_sc_memmove(void *dest, const void *src, size_t n) __arm_streaming_compatible {
+ return __arm_sc_memmove(dest, src, n);
+}
+
+// CHECK-LABEL: @test_sc_memset(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memset(ptr noundef [[S:%.*]], i32 noundef [[C:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CHECK-NEXT: ret ptr [[CALL]]
+//
+// CPP-CHECK-LABEL: @_Z14test_sc_memsetPvim(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memset(ptr noundef [[S:%.*]], i32 noundef [[C:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CPP-CHECK-NEXT: ret ptr [[CALL]]
+//
+void *test_sc_memset(void *s, int c, size_t n) __arm_streaming_compatible {
+ return __arm_sc_memset(s, c, n);
+}
+
+// CHECK-LABEL: @test_sc_memchr(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memchr(ptr noundef [[S:%.*]], i32 noundef [[C:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CHECK-NEXT: ret ptr [[CALL]]
+//
+// CPP-CHECK-LABEL: @_Z14test_sc_memchrPvim(
+// CPP-CHECK-NEXT: entry:
+// CPP-CHECK-NEXT: [[CALL:%.*]] = tail call ptr @__arm_sc_memchr(ptr noundef [[S:%.*]], i32 noundef [[C:%.*]], i64 noundef [[N:%.*]]) #[[ATTR3]]
+// CPP-CHECK-NEXT: ret ptr [[CALL]]
+//
+void *test_sc_memchr(void *s, int c, size_t n) __arm_streaming_compatible {
+ return __arm_sc_memchr(s, c, n);
+}
diff --git a/clang/test/CodeGen/aix-builtin-cpu-is.c b/clang/test/CodeGen/aix-builtin-cpu-is.c
new file mode 100644
index 000000000000..b0a0dec41b56
--- /dev/null
+++ b/clang/test/CodeGen/aix-builtin-cpu-is.c
@@ -0,0 +1,71 @@
+// RUN: echo "int main() { return __builtin_cpu_is(\"ppc970\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"ppc-cell-be\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"ppca2\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"ppc405\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"ppc440\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"ppc464\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"ppc476\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power4\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power5\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power5+\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power6\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power6x\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power7\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s -DVALUE=32768 \
+// RUN: --check-prefix=CHECKOP
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power8\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s -DVALUE=65536 \
+// RUN: --check-prefix=CHECKOP
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power9\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s -DVALUE=131072\
+// RUN: --check-prefix=CHECKOP
+
+// RUN: echo "int main() { return __builtin_cpu_is(\"power10\");}" > %t.c
+// RUN: %clang_cc1 -triple powerpc-ibm-aix7.2.0.0 -emit-llvm -o - %t.c | FileCheck %s -DVALUE=262144 \
+// RUN: --check-prefix=CHECKOP
+
+// CHECK: define i32 @main() #0 {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: %retval = alloca i32, align 4
+// CHECK-NEXT: store i32 0, ptr %retval, align 4
+// CHECK-NEXT: ret i32 0
+// CHECK-NEXT: }
+
+// CHECKOP: @_system_configuration = external global { i32, i32, i32 }
+// CHECKOP: define i32 @main() #0 {
+// CHECKOP-NEXT: entry:
+// CHECKOP-NEXT: %retval = alloca i32, align 4
+// CHECKOP-NEXT: store i32 0, ptr %retval, align 4
+// CHECKOP-NEXT: %0 = load i32, ptr getelementptr inbounds ({ i32, i32, i32 }, ptr @_system_configuration, i32 0, i32 1), align 4
+// CHECKOP-NEXT: %1 = icmp eq i32 %0, [[VALUE]]
+// CHECKOP-NEXT: %conv = zext i1 %1 to i32
+// CHECKOP-NEXT: ret i32 %conv
+// CHECKOP-NEXT: }
+
+
diff --git a/clang/test/CodeGen/target-builtin-noerror.c b/clang/test/CodeGen/target-builtin-noerror.c
index 9608b5f37baa..b438e50848a4 100644
--- a/clang/test/CodeGen/target-builtin-noerror.c
+++ b/clang/test/CodeGen/target-builtin-noerror.c
@@ -141,6 +141,7 @@ void verifyfeaturestrings(void) {
(void)__builtin_cpu_supports("sm3");
(void)__builtin_cpu_supports("sha512");
(void)__builtin_cpu_supports("sm4");
+ (void)__builtin_cpu_supports("apxf");
(void)__builtin_cpu_supports("usermsr");
(void)__builtin_cpu_supports("avx10.1-256");
(void)__builtin_cpu_supports("avx10.1-512");
diff --git a/clang/test/CodeGenCUDA/device-stub.cu b/clang/test/CodeGenCUDA/device-stub.cu
index d7a7b1bb9fe9..60304647bd4c 100644
--- a/clang/test/CodeGenCUDA/device-stub.cu
+++ b/clang/test/CodeGenCUDA/device-stub.cu
@@ -50,21 +50,19 @@
// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \
// RUN: -fgpu-rdc -fcuda-include-gpubinary %t -o - -x hip \
// RUN: | FileCheck -allow-deprecated-dag-overlap %s --check-prefixes=ALL,LNX,RDC,HIP,HIPEF
-// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - -x hip\
+// RUN: %clang_cc1 -cuid=123 -triple x86_64-linux-gnu -emit-llvm %s -o - -x hip\
// RUN: | FileCheck -allow-deprecated-dag-overlap %s -check-prefixes=ALL,LNX,NORDC,HIP,HIPNEF
// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -aux-triple amdgcn -emit-llvm %s \
// RUN: -fcuda-include-gpubinary %t -o - -x hip\
// RUN: | FileCheck -allow-deprecated-dag-overlap %s --check-prefixes=ALL,WIN
-// RUN: %clang_cc1 -triple x86_64-pc-windows-msvc -aux-triple amdgcn -emit-llvm %s \
+// RUN: %clang_cc1 -cuid=123 -triple x86_64-pc-windows-msvc -aux-triple amdgcn -emit-llvm %s \
// RUN: -o - -x hip\
// RUN: | FileCheck -allow-deprecated-dag-overlap %s --check-prefixes=ALL,WIN,HIP,HIPNEF
#include "Inputs/cuda.h"
-// HIPNEF: $__hip_gpubin_handle = comdat any
-
#ifndef NOGLOBALS
// NORDC-DAG: @device_var = internal global i32
// RDC-DAG: @device_var = global i32
@@ -161,7 +159,7 @@ __device__ void device_use() {
// * constant unnamed string with GPU binary
// CUDA: @[[FATBIN:.*]] = private constant{{.*}} c"GPU binary would be here.",
// HIPEF: @[[FATBIN:.*]] = private constant{{.*}} c"GPU binary would be here.",{{.*}}align 4096
-// HIPNEF: @[[FATBIN:__hip_fatbin]] = external constant i8, section ".hip_fatbin"
+// HIPNEF: @[[FATBIN:__hip_fatbin_[0-9a-f]+]] = external constant i8, section ".hip_fatbin"
// CUDANORDC-SAME: section ".nv_fatbin", align 8
// CUDARDC-SAME: section "__nv_relfatbin", align 8
// * constant struct that wraps GPU binary
@@ -177,7 +175,7 @@ __device__ void device_use() {
// HIP-SAME: section ".hipFatBinSegment"
// * variable to save GPU binary handle after initialization
// CUDANORDC: @__[[PREFIX]]_gpubin_handle = internal global ptr null
-// HIPNEF: @__[[PREFIX]]_gpubin_handle = linkonce hidden global ptr null
+// HIPNEF: @__[[PREFIX]]_gpubin_handle_{{[0-9a-f]+}} = external hidden global ptr, align 8
// * constant unnamed string with NVModuleID
// CUDARDC: [[MODULE_ID_GLOBAL:@.*]] = private constant
// CUDARDC-SAME: c"[[MODULE_ID:.+]]\00", section "__nv_module_id", align 32
diff --git a/clang/test/CodeGenCUDA/host-used-device-var.cu b/clang/test/CodeGenCUDA/host-used-device-var.cu
index 7cb31aff8426..5328660c9dc9 100644
--- a/clang/test/CodeGenCUDA/host-used-device-var.cu
+++ b/clang/test/CodeGenCUDA/host-used-device-var.cu
@@ -1,9 +1,9 @@
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -x hip %s \
// RUN: -std=c++17 -O3 -mllvm -amdgpu-internalize-symbols -emit-llvm -o - \
-// RUN: | FileCheck -check-prefix=DEV %s
+// RUN: -cuid=123 | FileCheck -check-prefix=DEV %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x hip %s \
-// RUN: -std=c++17 -O3 -emit-llvm -o - | FileCheck -check-prefix=HOST %s
+// RUN: -std=c++17 -O3 -emit-llvm -o - -cuid=123 | FileCheck -check-prefix=HOST %s
// Negative tests.
@@ -187,6 +187,7 @@ public:
// DEV-SAME: {{^[^@]*}} @_ZL2u3
// DEV-SAME: {{^[^@]*}} @_ZZ4fun1vE11static_var1
// DEV-SAME: {{^[^@]*}} @_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var2
+// DEV-SAME: {{^[^@]*}} @__hip_cuid_{{[0-9a-f]+}}
// DEV-SAME: {{^[^@]*}} @constexpr_var2b
// DEV-SAME: {{^[^@]*}} @inline_var
// DEV-SAME: {{^[^@]*}} @u1
diff --git a/clang/test/Driver/Inputs/hip.h b/clang/test/Driver/Inputs/hip.h
new file mode 100644
index 000000000000..5be772a7b341
--- /dev/null
+++ b/clang/test/Driver/Inputs/hip.h
@@ -0,0 +1,25 @@
+/* Minimal declarations for HIP support. Testing purposes only. */
+
+#define __constant__ __attribute__((constant))
+#define __device__ __attribute__((device))
+#define __global__ __attribute__((global))
+#define __host__ __attribute__((host))
+#define __shared__ __attribute__((shared))
+#define __managed__ __attribute__((managed))
+
+struct dim3 {
+ unsigned x, y, z;
+ __host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {}
+};
+
+typedef struct hipStream *hipStream_t;
+typedef enum hipError {} hipError_t;
+int hipConfigureCall(dim3 gridSize, dim3 blockSize, unsigned long long sharedSize = 0,
+ hipStream_t stream = 0);
+extern "C" hipError_t __hipPushCallConfiguration(dim3 gridSize, dim3 blockSize,
+ unsigned long long sharedSize = 0,
+ hipStream_t stream = 0);
+extern "C" hipError_t hipLaunchKernel(const void *func, dim3 gridDim,
+ dim3 blockDim, void **args,
+ unsigned long long sharedMem,
+ hipStream_t stream);
diff --git a/clang/test/Driver/arm-alignment.c b/clang/test/Driver/arm-alignment.c
index 9177b625729b..8c915477af9a 100644
--- a/clang/test/Driver/arm-alignment.c
+++ b/clang/test/Driver/arm-alignment.c
@@ -22,6 +22,21 @@
// RUN: %clang -target armv7-windows -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-ARM < %t %s
+// RUN: %clang --target=armv6 -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-ALIGNED-ARM < %t %s
+
+// RUN: %clang --target=armv7 -### %s 2> %t
+// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-ARM < %t %s
+
+// RUN: %clang -target thumbv6m-none-gnueabi -mcpu=cortex-m0 -### %s 2> %t
+// RUN: FileCheck --check-prefix CHECK-ALIGNED-ARM <%t %s
+
+// RUN: %clang -target thumb-none-gnueabi -mcpu=cortex-m0 -### %s 2> %t
+// RUN: FileCheck --check-prefix CHECK-ALIGNED-ARM <%t %s
+
+// RUN: %clang -target thumbv8m.base-none-gnueabi -### %s 2> %t
+// RUN: FileCheck --check-prefix CHECK-ALIGNED-ARM <%t %s
+
// RUN: %clang --target=aarch64 -munaligned-access -### %s 2> %t
// RUN: FileCheck --check-prefix=CHECK-UNALIGNED-AARCH64 < %t %s
diff --git a/clang/test/Driver/clang-offload-bundler.c b/clang/test/Driver/clang-offload-bundler.c
index 7d0b6b27a60a..9d8b81ee9806 100644
--- a/clang/test/Driver/clang-offload-bundler.c
+++ b/clang/test/Driver/clang-offload-bundler.c
@@ -10,6 +10,7 @@
// RUN: %clang -O0 -target %itanium_abi_triple %s -c -emit-llvm -o %t.bc
// RUN: %clang -O0 -target %itanium_abi_triple %s -S -o %t.s
// RUN: %clang -O0 -target %itanium_abi_triple %s -c -o %t.o
+// RUN: obj2yaml %t.o > %t.o.yaml
// RUN: %clang -O0 -target %itanium_abi_triple %s -emit-ast -o %t.ast
//
@@ -305,11 +306,13 @@
// RUN: clang-offload-bundler -type=o -targets=host-%itanium_abi_triple,openmp-powerpc64le-ibm-linux-gnu,openmp-x86_64-pc-linux-gnu -input=%t.o -input=%t.tgt1 -input=%t.tgt2 -output=%t.bundle3.o
// RUN: clang-offload-bundler -type=o -input=%t.bundle3.o -list | FileCheck -check-prefix=CKLST %s
// RUN: clang-offload-bundler -type=o -targets=host-%itanium_abi_triple,openmp-powerpc64le-ibm-linux-gnu,openmp-x86_64-pc-linux-gnu -output=%t.res.o -output=%t.res.tgt1 -output=%t.res.tgt2 -input=%t.bundle3.o -unbundle
-// RUN: diff %t.bundle3.o %t.res.o
+// RUN: obj2yaml %t.res.o > %t.res.o.yaml
+// RUN: diff %t.o.yaml %t.res.o.yaml
// RUN: diff %t.tgt1 %t.res.tgt1
// RUN: diff %t.tgt2 %t.res.tgt2
// RUN: clang-offload-bundler -type=o -targets=openmp-powerpc64le-ibm-linux-gnu,host-%itanium_abi_triple,openmp-x86_64-pc-linux-gnu -output=%t.res.tgt1 -output=%t.res.o -output=%t.res.tgt2 -input=%t.bundle3.o -unbundle
-// RUN: diff %t.bundle3.o %t.res.o
+// RUN: obj2yaml %t.res.o > %t.res.o.yaml
+// RUN: diff %t.o.yaml %t.res.o.yaml
// RUN: diff %t.tgt1 %t.res.tgt1
// RUN: diff %t.tgt2 %t.res.tgt2
// RUN: clang-offload-bundler -type=o -targets=openmp-powerpc64le-ibm-linux-gnu -output=%t.res.tgt1 -input=%t.bundle3.o -unbundle
@@ -318,11 +321,13 @@
// Check if we can unbundle a file with no magic strings.
// RUN: clang-offload-bundler -type=o -input=%t.o -list | FileCheck -check-prefix=CKLST2 --allow-empty %s
// RUN: clang-offload-bundler -type=o -targets=host-%itanium_abi_triple,openmp-powerpc64le-ibm-linux-gnu,openmp-x86_64-pc-linux-gnu -output=%t.res.o -output=%t.res.tgt1 -output=%t.res.tgt2 -input=%t.o -unbundle -allow-missing-bundles
-// RUN: diff %t.o %t.res.o
+// RUN: obj2yaml %t.res.o > %t.res.o.yaml
+// RUN: diff %t.o.yaml %t.res.o.yaml
// RUN: diff %t.empty %t.res.tgt1
// RUN: diff %t.empty %t.res.tgt2
// RUN: clang-offload-bundler -type=o -targets=openmp-powerpc64le-ibm-linux-gnu,host-%itanium_abi_triple,openmp-x86_64-pc-linux-gnu -output=%t.res.tgt1 -output=%t.res.o -output=%t.res.tgt2 -input=%t.o -unbundle -allow-missing-bundles
-// RUN: diff %t.o %t.res.o
+// RUN: obj2yaml %t.res.o > %t.res.o.yaml
+// RUN: diff %t.o.yaml %t.res.o.yaml
// RUN: diff %t.empty %t.res.tgt1
// RUN: diff %t.empty %t.res.tgt2
diff --git a/clang/test/Driver/cuda-cross-compiling.c b/clang/test/Driver/cuda-cross-compiling.c
index 6c9e2cac736b..086840accebe 100644
--- a/clang/test/Driver/cuda-cross-compiling.c
+++ b/clang/test/Driver/cuda-cross-compiling.c
@@ -69,6 +69,13 @@
// LOWERING: -cc1" "-triple" "nvptx64-nvidia-cuda" {{.*}} "-mllvm" "--nvptx-lower-global-ctor-dtor"
//
+// Test passing arguments directly to nvlink.
+//
+// RUN: %clang -target nvptx64-nvidia-cuda -Wl,-v -Wl,a,b -march=sm_52 -### %s 2>&1 \
+// RUN: | FileCheck -check-prefix=LINKER-ARGS %s
+
+// LINKER-ARGS: nvlink{{.*}}"-v"{{.*}}"a" "b"
+
// Tests for handling a missing architecture.
//
// RUN: not %clang -target nvptx64-nvidia-cuda %s -### 2>&1 \
diff --git a/clang/test/Driver/hip-partial-link.hip b/clang/test/Driver/hip-partial-link.hip
new file mode 100644
index 000000000000..a1d31f9a6519
--- /dev/null
+++ b/clang/test/Driver/hip-partial-link.hip
@@ -0,0 +1,97 @@
+// REQUIRES: x86-registered-target, amdgpu-registered-target, lld, system-linux
+
+// RUN: %clang -x hip --target=x86_64-unknown-linux-gnu \
+// RUN: --offload-arch=gfx906 -c -nostdinc -nogpuinc -nohipwrapperinc \
+// RUN: -nogpulib -fgpu-rdc -I%S/Inputs %s -o %t.1.o
+
+// RUN: %clang -x hip --target=x86_64-unknown-linux-gnu -DLIB \
+// RUN: --offload-arch=gfx906 -c -nostdinc -nogpuinc -nohipwrapperinc \
+// RUN: -nogpulib -fgpu-rdc -I%S/Inputs %s -o %t.2.o
+
+// RUN: %clang -x hip --target=x86_64-unknown-linux-gnu -DMAIN \
+// RUN: --offload-arch=gfx906 -c -nostdinc -nogpuinc -nohipwrapperinc \
+// RUN: -nogpulib -fgpu-rdc -I%S/Inputs %s -o %t.main.o
+
+// RUN: llvm-nm %t.1.o | FileCheck -check-prefix=OBJ1 %s
+// OBJ1: B __hip_cuid_[[ID:[0-9a-f]+]]
+// OBJ1: U __hip_fatbin_[[ID]]
+// OBJ1: U __hip_gpubin_handle_[[ID]]
+
+// RUN: llvm-nm %t.2.o | FileCheck -check-prefix=OBJ2 %s
+// OBJ2: B __hip_cuid_[[ID:[0-9a-f]+]]
+// OBJ2: U __hip_fatbin_[[ID]]
+// OBJ2: U __hip_gpubin_handle_[[ID]]
+
+// Link %t.1.o and %t.2.o by -r and then link with %t.main.o
+
+// RUN: %clang -v --target=x86_64-unknown-linux-gnu \
+// RUN: --hip-link -fgpu-rdc --offload-arch=gfx906 \
+// RUN: -r -fuse-ld=lld -nostdlib %t.1.o %t.2.o -o %t.lib.o \
+// RUN: 2>&1 | FileCheck -check-prefix=LD-R %s
+// LD-R: Found undefined HIP fatbin symbol: __hip_fatbin_[[ID1:[0-9a-f]+]]
+// LD-R: Found undefined HIP fatbin symbol: __hip_fatbin_[[ID2:[0-9a-f]+]]
+// LD-R: Found undefined HIP gpubin handle symbol: __hip_gpubin_handle_[[ID1]]
+// LD-R: Found undefined HIP gpubin handle symbol: __hip_gpubin_handle_[[ID2]]
+// LD-R: "{{.*}}/clang-offload-bundler" {{.*}}-unbundle
+// LD-R: "{{.*}}/lld" -flavor gnu -m elf64_amdgpu
+// LD-R: "{{.*}}/clang-offload-bundler"
+// LD-R: "{{.*}}/llvm-mc" -triple x86_64-unknown-linux-gnu
+// LD-R: "{{.*}}/ld.lld" {{.*}} -r
+
+// RUN: llvm-nm %t.lib.o | FileCheck -check-prefix=OBJ %s
+// OBJ: B __hip_cuid_[[ID1:[0-9a-f]+]]
+// OBJ: B __hip_cuid_[[ID2:[0-9a-f]+]]
+// OBJ: R __hip_fatbin_[[ID1]]
+// OBJ: R __hip_fatbin_[[ID2]]
+// OBJ: D __hip_gpubin_handle_[[ID1]]
+// OBJ: D __hip_gpubin_handle_[[ID2]]
+
+// RUN: %clang -v --target=x86_64-unknown-linux-gnu \
+// RUN: --hip-link -no-hip-rt -fgpu-rdc --offload-arch=gfx906 \
+// RUN: -fuse-ld=lld -nostdlib -r %t.main.o %t.lib.o -o %t.final.o \
+// RUN: 2>&1 | FileCheck -check-prefix=LINK-O %s
+// LINK-O-NOT: Found undefined HIP {{.*}}symbol
+
+// Generate a static lib with %t.1.o and %t.2.o then link with %t.main.o
+
+// RUN: %clang -v --target=x86_64-unknown-linux-gnu \
+// RUN: --hip-link -fgpu-rdc --offload-arch=gfx906 \
+// RUN: --emit-static-lib -fuse-ld=lld -nostdlib %t.1.o %t.2.o -o %t.a \
+// RUN: 2>&1 | FileCheck -check-prefix=STATIC %s
+// STATIC: Found undefined HIP fatbin symbol: __hip_fatbin_[[ID1:[0-9a-f]+]]
+// STATIC: Found undefined HIP fatbin symbol: __hip_fatbin_[[ID2:[0-9a-f]+]]
+// STATIC: Found undefined HIP gpubin handle symbol: __hip_gpubin_handle_[[ID1]]
+// STATIC: Found undefined HIP gpubin handle symbol: __hip_gpubin_handle_[[ID2]]
+// STATIC: "{{.*}}/clang-offload-bundler" {{.*}}-unbundle
+// STATIC: "{{.*}}/lld" -flavor gnu -m elf64_amdgpu
+// STATIC: "{{.*}}/clang-offload-bundler"
+// STATIC: "{{.*}}/llvm-mc" -triple x86_64-unknown-linux-gnu
+// STATIC: "{{.*}}/llvm-ar"
+
+// RUN: %clang -v --target=x86_64-unknown-linux-gnu \
+// RUN: --hip-link -no-hip-rt -fgpu-rdc --offload-arch=gfx906 \
+// RUN: -fuse-ld=lld -nostdlib -r %t.main.o %t.a -o %t.final.o \
+// RUN: 2>&1 | FileCheck -check-prefix=LINK-A %s
+// LINK-A-NOT: Found undefined HIP {{.*}}symbol
+
+#include "hip.h"
+
+#ifdef LIB
+__device__ int x;
+__device__ void libfun() {
+ x = 1;
+}
+#elif !defined(MAIN)
+__device__ void libfun();
+__global__ void kern() {
+ libfun();
+}
+void run() {
+ kern<<<1,1>>>();
+}
+#else
+extern void run();
+int main() {
+ run();
+}
+#endif
diff --git a/clang/test/Driver/hip-toolchain-rdc.hip b/clang/test/Driver/hip-toolchain-rdc.hip
index 1827531f9cab..d19d8ccd6cb2 100644
--- a/clang/test/Driver/hip-toolchain-rdc.hip
+++ b/clang/test/Driver/hip-toolchain-rdc.hip
@@ -1,7 +1,7 @@
// REQUIRES: x86-registered-target
// REQUIRES: amdgpu-registered-target
-// RUN: %clang -### --target=x86_64-linux-gnu \
+// RUN: %clang -### --target=x86_64-linux-gnu -v \
// RUN: -x hip --cuda-gpu-arch=gfx803 --cuda-gpu-arch=gfx900 \
// RUN: --hip-device-lib=lib1.bc --hip-device-lib=lib2.bc \
// RUN: --hip-device-lib-path=%S/Inputs/hip_multiple_inputs/lib1 \
@@ -12,7 +12,7 @@
// RUN: %S/Inputs/hip_multiple_inputs/b.hip \
// RUN: 2>&1 | FileCheck -check-prefixes=CHECK,LNX %s
-// RUN: %clang -### --target=x86_64-pc-windows-msvc \
+// RUN: %clang -### --target=x86_64-pc-windows-msvc -v \
// RUN: -x hip --cuda-gpu-arch=gfx803 --cuda-gpu-arch=gfx900 \
// RUN: --hip-device-lib=lib1.bc --hip-device-lib=lib2.bc \
// RUN: --hip-device-lib-path=%S/Inputs/hip_multiple_inputs/lib1 \
@@ -23,15 +23,31 @@
// RUN: %S/Inputs/hip_multiple_inputs/b.hip \
// RUN: 2>&1 | FileCheck -check-prefixes=CHECK,MSVC %s
-// check code object alignment in dumped llvm-mc input
-// LNX: .protected __hip_fatbin
-// LNX: .type __hip_fatbin,@object
-// LNX: .section .hip_fatbin,"a",@progbits
-// MSVC: .section .hip_fatbin, "dw"
-// CHECK: .globl __hip_fatbin
-// CHECK: .p2align 12
-// CHECK: __hip_fatbin:
-// CHECK: .incbin "[[BUNDLE:.*hipfb]]"
+// check HIP fatbin and gpubin handle symbols and code object alignment in dumped llvm-mc input
+// CHECK: Found undefined HIP fatbin symbol: __hip_fatbin_[[ID1:[0-9a-f]+]]
+// CHECK: Found undefined HIP fatbin symbol: __hip_fatbin_[[ID2:[0-9a-f]+]]
+// CHECK: Found undefined HIP gpubin handle symbol: __hip_gpubin_handle_[[ID1]]
+// CHECK: Found undefined HIP gpubin handle symbol: __hip_gpubin_handle_[[ID2]]
+// LNX: .protected __hip_gpubin_handle_[[ID1]]
+// LNX: .type __hip_gpubin_handle_[[ID1]]
+// LNX-LABEL: .section .hip_gpubin_handle,"aw"
+// MSVC-LABEL: .section .hip_gpubin_handle,"dw"
+// CHECK: .globl __hip_gpubin_handle_[[ID1]]
+// CHECK-NEXT: .p2align 3
+// CHECK-NEXT:__hip_gpubin_handle_[[ID1]]:
+// CHECK-NEXT: .zero 8
+// CHECK-NEXT: .globl __hip_gpubin_handle_[[ID2]]
+// CHECK-NEXT: .set __hip_gpubin_handle_[[ID2]],__hip_gpubin_handle_[[ID1]]
+// LNX: .protected __hip_fatbin_[[ID1]]
+// LNX: .type __hip_fatbin_[[ID1]],@object
+// LNX-LABEL: .section .hip_fatbin,"a",@progbits
+// MSVC-LABEL: .section .hip_fatbin,"dw"
+// CHECK: .globl __hip_fatbin_[[ID1]]
+// CHECK-NEXT: .p2align 12
+// CHECK-NEXT: .globl __hip_fatbin_[[ID2]]
+// CHECK-NEXT: .set __hip_fatbin_[[ID2]],__hip_fatbin_[[ID1]]
+// CHECK-NEXT: __hip_fatbin_[[ID1]]:
+// CHECK-NEXT: .incbin "[[BUNDLE:.*hipfb]]"
// LNX: .section .note.GNU-stack, "", @progbits
// MSVC-NOT: .note.GNU-stack
diff --git a/clang/test/Driver/linker-wrapper.c b/clang/test/Driver/linker-wrapper.c
index 7fd46778ac91..83df2b84adef 100644
--- a/clang/test/Driver/linker-wrapper.c
+++ b/clang/test/Driver/linker-wrapper.c
@@ -21,7 +21,7 @@ __attribute__((visibility("protected"), used)) int x;
// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run \
// RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=NVPTX-LINK
-// NVPTX-LINK: clang{{.*}} -o {{.*}}.img --target=nvptx64-nvidia-cuda -march=sm_70 -O2 -Wl,--no-undefined {{.*}}.o {{.*}}.o
+// NVPTX-LINK: clang{{.*}} -o {{.*}}.img --target=nvptx64-nvidia-cuda -march=sm_70 -O2 {{.*}}.o {{.*}}.o
// RUN: clang-offload-packager -o %t.out \
// RUN: --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70 \
@@ -30,7 +30,7 @@ __attribute__((visibility("protected"), used)) int x;
// RUN: clang-linker-wrapper --host-triple=x86_64-unknown-linux-gnu --dry-run --device-debug -O0 \
// RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefix=NVPTX-LINK-DEBUG
-// NVPTX-LINK-DEBUG: clang{{.*}} -o {{.*}}.img --target=nvptx64-nvidia-cuda -march=sm_70 -O2 -Wl,--no-undefined {{.*}}.o {{.*}}.o -g
+// NVPTX-LINK-DEBUG: clang{{.*}} -o {{.*}}.img --target=nvptx64-nvidia-cuda -march=sm_70 -O2 {{.*}}.o {{.*}}.o -g
// RUN: clang-offload-packager -o %t.out \
// RUN: --image=file=%t.elf.o,kind=openmp,triple=amdgcn-amd-amdhsa,arch=gfx908 \
diff --git a/clang/test/Driver/openmp-offload-gpu.c b/clang/test/Driver/openmp-offload-gpu.c
index bccc5fd9483a..5da74a35d87a 100644
--- a/clang/test/Driver/openmp-offload-gpu.c
+++ b/clang/test/Driver/openmp-offload-gpu.c
@@ -393,14 +393,28 @@
//
// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fopenmp=libomp \
// RUN: --libomptarget-nvptx-bc-path=%S/Inputs/libomptarget/libomptarget-nvptx-test.bc \
+// RUN: --libomptarget-amdgpu-bc-path=%S/Inputs/hip_dev_lib/libomptarget-amdgpu-gfx803.bc \
// RUN: --cuda-path=%S/Inputs/CUDA_102/usr/local/cuda \
-// RUN: --offload-arch=sm_52 -gpulibc -nogpuinc %s 2>&1 \
+// RUN: --rocm-path=%S/Inputs/rocm \
+// RUN: --offload-arch=sm_52,gfx803 -gpulibc -nogpuinc %s 2>&1 \
// RUN: | FileCheck --check-prefix=LIBC-GPU %s
-// LIBC-GPU: "-lcgpu"{{.*}}"-lmgpu"
+// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fopenmp=libomp \
+// RUN: --libomptarget-nvptx-bc-path=%S/Inputs/libomptarget/libomptarget-nvptx-test.bc \
+// RUN: --libomptarget-amdgpu-bc-path=%S/Inputs/hip_dev_lib/libomptarget-amdgpu-gfx803.bc \
+// RUN: --cuda-path=%S/Inputs/CUDA_102/usr/local/cuda \
+// RUN: --rocm-path=%S/Inputs/rocm \
+// RUN: -Xopenmp-target=nvptx64-nvidia-cuda -march=sm_52 \
+// RUN: -Xopenmp-target=amdgcn-amd-amdhsa -march=gfx803 \
+// RUN: -fopenmp-targets=nvptx64-nvidia-cuda,amdgcn-amd-amdhsa -gpulibc -nogpuinc %s 2>&1 \
+// RUN: | FileCheck --check-prefix=LIBC-GPU %s
+// LIBC-GPU-DAG: "-lcgpu-amdgpu"
+// LIBC-GPU-DAG: "-lmgpu-amdgpu"
+// LIBC-GPU-DAG: "-lcgpu-nvptx"
+// LIBC-GPU-DAG: "-lmgpu-nvptx"
// RUN: %clang -### --target=x86_64-unknown-linux-gnu -fopenmp=libomp \
// RUN: --libomptarget-nvptx-bc-path=%S/Inputs/libomptarget/libomptarget-nvptx-test.bc \
// RUN: --cuda-path=%S/Inputs/CUDA_102/usr/local/cuda \
// RUN: --offload-arch=sm_52 -nogpulibc -nogpuinc %s 2>&1 \
// RUN: | FileCheck --check-prefix=NO-LIBC-GPU %s
-// NO-LIBC-GPU-NOT: "-lcgpu"{{.*}}"-lmgpu"
+// NO-LIBC-GPU-NOT: -lmgpu{{.*}}-lcgpu
diff --git a/clang/test/Driver/riscv-features.c b/clang/test/Driver/riscv-features.c
index a108383e29fb..fc5fb0f27e3a 100644
--- a/clang/test/Driver/riscv-features.c
+++ b/clang/test/Driver/riscv-features.c
@@ -41,6 +41,14 @@
// FAST-UNALIGNED-ACCESS: "-target-feature" "+fast-unaligned-access"
// NO-FAST-UNALIGNED-ACCESS: "-target-feature" "-fast-unaligned-access"
+// RUN: %clang --target=riscv32-unknown-elf --gcc-toolchain="" -### %s 2>&1 | FileCheck %s -check-prefix=NOUWTABLE
+// RUN: %clang --target=riscv32-unknown-elf --gcc-toolchain="" -fasynchronous-unwind-tables -### %s 2>&1 | FileCheck %s -check-prefix=UWTABLE
+// RUN: %clang --target=riscv64-unknown-elf --gcc-toolchain="" -### %s 2>&1 | FileCheck %s -check-prefix=NOUWTABLE
+// RUN: %clang --target=riscv64-unknown-elf --gcc-toolchain="" -fasynchronous-unwind-tables -### %s 2>&1 | FileCheck %s -check-prefix=UWTABLE
+//
+// UWTABLE: "-funwind-tables=2"
+// NOUWTABLE-NOT: "-funwind-tables=2"
+
// RUN: %clang --target=riscv32-linux -### %s -fsyntax-only 2>&1 \
// RUN: | FileCheck %s -check-prefix=DEFAULT-LINUX
// RUN: %clang --target=riscv64-linux -### %s -fsyntax-only 2>&1 \
diff --git a/clang/test/InstallAPI/installapi-basic.test b/clang/test/InstallAPI/basic.test
index 22b04792ca2c..22b04792ca2c 100644
--- a/clang/test/InstallAPI/installapi-basic.test
+++ b/clang/test/InstallAPI/basic.test
diff --git a/clang/test/InstallAPI/installapi-driver-invalid-options.test b/clang/test/InstallAPI/driver-invalid-options.test
index a2e008e1eb03..a2e008e1eb03 100644
--- a/clang/test/InstallAPI/installapi-driver-invalid-options.test
+++ b/clang/test/InstallAPI/driver-invalid-options.test
diff --git a/clang/test/Modules/compile-pcm-with-pic.cppm b/clang/test/Modules/compile-pcm-with-pic.cppm
new file mode 100644
index 000000000000..3d818dde0cd2
--- /dev/null
+++ b/clang/test/Modules/compile-pcm-with-pic.cppm
@@ -0,0 +1,21 @@
+// REQUIRES: x86-registered-target
+
+// RUN: rm -rf %t
+// RUN: mkdir %t
+
+// RUN: %clang_cc1 -std=c++20 %s -pic-level 2 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++20 %s -pic-level 2 -fmodule-output=%t/m.pcm -emit-llvm -o - \
+// RUN: | FileCheck %s
+//
+// RUN: %clang_cc1 -std=c++20 %s -emit-module-interface -o %t/m.pcm
+// RUN: %clang_cc1 -std=c++20 %t/m.pcm -pic-level 2 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++20 %t/m.pcm -emit-llvm -o - | FileCheck %s --check-prefix=NOPIC
+
+export module m;
+export int x;
+export int func() {
+ return x;
+}
+
+// CHECK: ![[METADATA_NUM:[0-9]+]] = !{{{.*}}, !"PIC Level", i32 2}
+// NOPIC-NOT: ![[METADATA_NUM:[0-9]+]] = !{{{.*}}, !"PIC Level", i32 2}
diff --git a/clang/test/Parser/cxx-concepts-requires-clause.cpp b/clang/test/Parser/cxx-concepts-requires-clause.cpp
index 1ec1eefa1286..5b5bc9ea978b 100644
--- a/clang/test/Parser/cxx-concepts-requires-clause.cpp
+++ b/clang/test/Parser/cxx-concepts-requires-clause.cpp
@@ -168,3 +168,30 @@ auto lambda4 = [] requires(sizeof(char) == 1){}; // expected-error {{expected bo
#if __cplusplus <= 202002L
// expected-warning@-2{{lambda without a parameter clause is a C++23 extension}}
#endif
+
+namespace GH78524 {
+
+template <typename T> T Foo;
+
+template <typename T> auto C(Foo<T>);
+
+template <typename T> struct D {
+ decltype(T()(C<T>)) Type;
+};
+
+template <typename T, typename U> D<T> G(T, U) { return {}; }
+
+struct E {};
+
+void F() {
+ G([]<typename T>
+// ~~~~~~~~~~ T: Depth: 0, Index: 0
+ requires requires { [](auto...) {}; }(T)
+// ~~~~ auto: Depth: 1, Index: 0
+ { return T(); },
+ E{});
+}
+
+int a = []<int=0> requires requires { [](auto){}; } { return 0; }();
+
+} // namespace GH78524
diff --git a/clang/test/Preprocessor/has_builtin_cpuid.c b/clang/test/Preprocessor/has_builtin_cpuid.c
index 8de6331e62d6..35ef65ecdd9b 100644
--- a/clang/test/Preprocessor/has_builtin_cpuid.c
+++ b/clang/test/Preprocessor/has_builtin_cpuid.c
@@ -13,8 +13,3 @@
# error "ARM/PPC shouldn't have __builtin_cpu_init"
# endif
#endif
-#if __has_builtin(__builtin_cpu_supports)
-# ifdef ARM
-# error "ARM shouldn't have __builtin_cpu_supports"
-# endif
-#endif
diff --git a/clang/test/Sema/aarch64-cpu-supports.c b/clang/test/Sema/aarch64-cpu-supports.c
new file mode 100644
index 000000000000..24aae9542dbc
--- /dev/null
+++ b/clang/test/Sema/aarch64-cpu-supports.c
@@ -0,0 +1,26 @@
+// RUN: %clang_cc1 -fsyntax-only -triple aarch64-linux-gnu -verify %s
+
+int test_aarch64_features(void) {
+ char * ssbs2;
+ // expected-error@+1 {{expression is not a string literal}}
+ if (__builtin_cpu_supports(ssbs2))
+ return 1;
+ // expected-error@+1 {{invalid cpu feature string}}
+ if (__builtin_cpu_supports(""))
+ return 2;
+ // expected-error@+1 {{invalid cpu feature string}}
+ if (__builtin_cpu_supports("pmull128"))
+ return 3;
+ // expected-error@+1 {{invalid cpu feature string}}
+ if (__builtin_cpu_supports("sve2,rpres"))
+ return 4;
+ // expected-error@+1 {{invalid cpu feature string}}
+ if (__builtin_cpu_supports("dgh+sve2-pmull"))
+ return 5;
+ // expected-error@+1 {{invalid cpu feature string}}
+ if (__builtin_cpu_supports("default"))
+ return 6;
+ if (__builtin_cpu_supports(" ssbs + bti "))
+ return 7;
+ return 0;
+}
diff --git a/clang/test/Sema/aarch64-sme-intrinsics/acle_sme_zero.c b/clang/test/Sema/aarch64-sme-intrinsics/acle_sme_zero.c
new file mode 100644
index 000000000000..a852ffa09c60
--- /dev/null
+++ b/clang/test/Sema/aarch64-sme-intrinsics/acle_sme_zero.c
@@ -0,0 +1,10 @@
+// REQUIRES: aarch64-registered-target
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sme -target-feature +sve -fsyntax-only -verify %s
+
+#include <arm_sme.h>
+
+void test_svzero_args(uint64_t m) {
+ svzero_za(0); // expected-error {{too many arguments to function call, expected 0, have 1}}
+ svzero_za(m); // expected-error {{too many arguments to function call, expected 0, have 1}}
+ svzero_mask_za(m); // expected-error {{argument to 'svzero_mask_za' must be a constant integer}}
+}
diff --git a/clang/test/Sema/aix-builtin-cpu-unsupports.c b/clang/test/Sema/aix-builtin-cpu-unsupports.c
new file mode 100644
index 000000000000..10e21867c393
--- /dev/null
+++ b/clang/test/Sema/aix-builtin-cpu-unsupports.c
@@ -0,0 +1,6 @@
+// RUN: %clang_cc1 -fsyntax-only -triple powerpc-ibm-aix7.1.0.0 -verify %s
+
+int main(void) {
+ if (__builtin_cpu_is("power8")) // expected-error {{this builtin is available only on AIX 7.2 and later operating systems}}
+ return 1;
+}
diff --git a/clang/test/Sema/builtin-cpu-supports.c b/clang/test/Sema/builtin-cpu-supports.c
index cc6f1beb5d8a..733d797f3ff8 100644
--- a/clang/test/Sema/builtin-cpu-supports.c
+++ b/clang/test/Sema/builtin-cpu-supports.c
@@ -27,7 +27,7 @@ int main(void) {
(void)__builtin_cpu_supports("x86-64-v4");
(void)__builtin_cpu_supports("x86-64-v5"); // expected-error {{invalid cpu feature string for builtin}}
#else
- if (__builtin_cpu_supports("aes")) // expected-error {{builtin is not supported on this target}}
+ if (__builtin_cpu_supports("neon")) // expected-error {{invalid cpu feature string for builtin}}
a("vsx");
if (__builtin_cpu_is("cortex-x3")) // expected-error {{builtin is not supported on this target}}
diff --git a/clang/test/SemaCXX/PR20334-std_initializer_list_diagnosis_assertion.cpp b/clang/test/SemaCXX/PR20334-std_initializer_list_diagnosis_assertion.cpp
index ec672089b84a..fb1feee01b29 100644
--- a/clang/test/SemaCXX/PR20334-std_initializer_list_diagnosis_assertion.cpp
+++ b/clang/test/SemaCXX/PR20334-std_initializer_list_diagnosis_assertion.cpp
@@ -1,5 +1,8 @@
// RUN: %clang_cc1 -std=c++11 -verify -emit-llvm-only %s
// RUN: %clang_cc1 -std=c++98 -fsyntax-only -verify %s -DCPP98
+// RUN: %clang_cc1 -std=c++11 -verify -emit-llvm-only %s -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -std=c++98 -fsyntax-only -verify %s -DCPP98 -fexperimental-new-constant-interpreter
+
namespace std {
template <class _E>
diff --git a/clang/test/SemaCXX/PR68542.cpp b/clang/test/SemaCXX/PR68542.cpp
index fc767a78c8b0..e266bf9ba77a 100644
--- a/clang/test/SemaCXX/PR68542.cpp
+++ b/clang/test/SemaCXX/PR68542.cpp
@@ -1,20 +1,20 @@
// RUN: %clang_cc1 -verify -std=c++20 -fsyntax-only %s
+// RUN: %clang_cc1 -verify -std=c++20 -fsyntax-only %s -fexperimental-new-constant-interpreter
-struct S {
+struct S { // expected-note {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int' to 'S &&' for 1st argument}} \
+ // expected-note {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int' to 'const S &' for 1st argument}}
int e;
};
template<class T>
consteval int get_format() {
- return nullptr; // expected-error{{cannot initialize return object of type 'int' with an rvalue of type 'std::nullptr_t'}}
+ return nullptr; // expected-error {{cannot initialize return object of type 'int' with an rvalue of type 'std::nullptr_t'}}
}
template<class T>
constexpr S f(T) noexcept {
- return get_format<T>(); // expected-error{{no viable conversion from returned value of type 'int' to function return type 'S'}}
+ return get_format<T>(); // expected-error {{no viable conversion from returned value of type 'int' to function return type 'S'}}
}
-constexpr S x = f(0); // expected-error{{constexpr variable 'x' must be initialized by a constant expression}}
-// expected-note@-1{{in instantiation of function template specialization 'f<int>' requested here}}
-// expected-note@3{{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int' to 'S &&' for 1st argument}}
-// expected-note@3{{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int' to 'const S &' for 1st argument}}
+constexpr S x = f(0); // expected-error {{constexpr variable 'x' must be initialized by a constant expression}} \
+ // expected-note {{in instantiation of function template specialization 'f<int>' requested here}}
diff --git a/clang/test/SemaCXX/cxx2a-ms-no-unique-address.cpp b/clang/test/SemaCXX/cxx2a-ms-no-unique-address.cpp
index 42058559a087..822ed752fa9c 100644
--- a/clang/test/SemaCXX/cxx2a-ms-no-unique-address.cpp
+++ b/clang/test/SemaCXX/cxx2a-ms-no-unique-address.cpp
@@ -17,3 +17,28 @@ struct [[msvc::no_unique_address]] S { // expected-error {{only applies to non-b
int [[msvc::no_unique_address]] c; // expected-error {{cannot be applied to types}} unsupported-error {{cannot be applied to types}}
};
+
+struct CStructNoUniqueAddress {
+ int one;
+ [[no_unique_address]] int two;
+ // expected-warning@-1 {{unknown attribute 'no_unique_address' ignored}}
+};
+
+struct CStructMSVCNoUniqueAddress {
+ int one;
+ [[msvc::no_unique_address]] int two;
+ // unsupported-warning@-1 {{unknown attribute 'no_unique_address' ignored}}
+};
+
+struct CStructMSVCNoUniqueAddress2 {
+ int one;
+ [[msvc::no_unique_address]] int two;
+ // unsupported-warning@-1 {{unknown attribute 'no_unique_address' ignored}}
+};
+
+static_assert(__has_cpp_attribute(no_unique_address) == 0);
+// unsupported-error@-1 {{static assertion failed due to requirement '201803L == 0'}}
+static_assert(!__is_layout_compatible(CStructNoUniqueAddress, CStructMSVCNoUniqueAddress), "");
+static_assert(__is_layout_compatible(CStructMSVCNoUniqueAddress, CStructMSVCNoUniqueAddress), "");
+static_assert(!__is_layout_compatible(CStructMSVCNoUniqueAddress, CStructMSVCNoUniqueAddress2), "");
+// unsupported-error@-1 {{static assertion failed due to requirement '!__is_layout_compatible(CStructMSVCNoUniqueAddress, CStructMSVCNoUniqueAddress2)':}}
diff --git a/clang/test/SemaCXX/type-traits.cpp b/clang/test/SemaCXX/type-traits.cpp
index 2c35d5ee19a4..23c339ebdf08 100644
--- a/clang/test/SemaCXX/type-traits.cpp
+++ b/clang/test/SemaCXX/type-traits.cpp
@@ -1768,8 +1768,8 @@ void is_layout_compatible(int n)
static_assert(!__is_layout_compatible(CppStructNonStandardBySameBase, CppStructNonStandardBySameBase2), "");
static_assert(!__is_layout_compatible(CppStructNonStandardBy2ndVirtBase, CppStructNonStandardBy2ndVirtBase2), "");
static_assert(__is_layout_compatible(CStruct, CStructWithQualifiers), "");
- static_assert(__is_layout_compatible(CStruct, CStructNoUniqueAddress) == bool(__has_cpp_attribute(no_unique_address)), ""); // FIXME: this is CWG2759
- static_assert(__is_layout_compatible(CStructNoUniqueAddress, CStructNoUniqueAddress2) == bool(__has_cpp_attribute(no_unique_address)), ""); // FIXME: this is CWG2759
+ static_assert(__is_layout_compatible(CStruct, CStructNoUniqueAddress) != bool(__has_cpp_attribute(no_unique_address)), "");
+ static_assert(__is_layout_compatible(CStructNoUniqueAddress, CStructNoUniqueAddress2) != bool(__has_cpp_attribute(no_unique_address)), "");
static_assert(__is_layout_compatible(CStruct, CStructAlignment), "");
static_assert(__is_layout_compatible(CStruct, CStructAlignedMembers), ""); // FIXME: alignment of members impact common initial sequence
static_assert(__is_layout_compatible(CStructWithBitfelds, CStructWithBitfelds), "");
@@ -1782,10 +1782,10 @@ void is_layout_compatible(int n)
static_assert(!__is_layout_compatible(void(CStruct2::*)(int), void(CStruct2::*)(char)), "");
static_assert(__is_layout_compatible(CStructNested, CStructNested2), "");
static_assert(__is_layout_compatible(UnionLayout, UnionLayout), "");
- static_assert(__is_layout_compatible(UnionLayout, UnionLayout2), "");
+ static_assert(!__is_layout_compatible(UnionLayout, UnionLayout2), "");
static_assert(!__is_layout_compatible(UnionLayout, UnionLayout3), "");
- static_assert(__is_layout_compatible(StructWithAnonUnion, StructWithAnonUnion2), "");
- static_assert(__is_layout_compatible(StructWithAnonUnion, StructWithAnonUnion3), "");
+ static_assert(!__is_layout_compatible(StructWithAnonUnion, StructWithAnonUnion2), "");
+ static_assert(!__is_layout_compatible(StructWithAnonUnion, StructWithAnonUnion3), "");
static_assert(__is_layout_compatible(EnumLayout, EnumClassLayout), "");
static_assert(__is_layout_compatible(EnumForward, EnumForward), "");
static_assert(__is_layout_compatible(EnumForward, EnumClassForward), "");
diff --git a/clang/test/SemaOpenACC/no-branch-in-out.c b/clang/test/SemaOpenACC/no-branch-in-out.c
new file mode 100644
index 000000000000..33a171f1b68d
--- /dev/null
+++ b/clang/test/SemaOpenACC/no-branch-in-out.c
@@ -0,0 +1,95 @@
+// RUN: %clang_cc1 %s -verify -fopenacc
+
+void BreakContinue() {
+
+#pragma acc parallel
+ for(int i =0; i < 5; ++i) {
+ switch(i) {
+ case 0:
+ break; // leaves switch, not 'for'.
+ default:
+ i +=2;
+ break;
+ }
+ if (i == 2)
+ continue;
+
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+
+ int j;
+ switch(j) {
+ case 0:
+#pragma acc parallel
+ {
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+ case 1:
+#pragma acc parallel
+ {
+ }
+ break;
+ }
+
+#pragma acc parallel
+ for(int i = 0; i < 5; ++i) {
+ if (i > 1)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+
+#pragma acc parallel
+ switch(j) {
+ case 1:
+ break;
+ }
+
+#pragma acc parallel
+ {
+ for(int i = 1; i < 100; i++) {
+ if (i > 4)
+ break;
+ }
+ }
+
+ for (int i =0; i < 5; ++i) {
+#pragma acc parallel
+ {
+ continue; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+ }
+
+#pragma acc parallel
+ for (int i =0; i < 5; ++i) {
+ continue;
+ }
+
+#pragma acc parallel
+ for (int i =0; i < 5; ++i) {
+ {
+ continue;
+ }
+ }
+
+ for (int i =0; i < 5; ++i) {
+#pragma acc parallel
+ {
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+ }
+
+#pragma acc parallel
+ while (j) {
+ --j;
+ if (j > 4)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ }
+
+#pragma acc parallel
+ do {
+ --j;
+ if (j > 4)
+ break; // expected-error{{invalid branch out of OpenACC Compute Construct}}
+ } while (j );
+
+}
+
diff --git a/clang/tools/c-index-test/core_main.cpp b/clang/tools/c-index-test/core_main.cpp
index 56bf7c91acc7..c552466c9a18 100644
--- a/clang/tools/c-index-test/core_main.cpp
+++ b/clang/tools/c-index-test/core_main.cpp
@@ -276,7 +276,7 @@ static bool printSourceSymbolsFromModule(StringRef modulePath,
CompilerInstance::createDiagnostics(new DiagnosticOptions());
std::unique_ptr<ASTUnit> AU = ASTUnit::LoadFromASTFile(
std::string(modulePath), *pchRdr, ASTUnit::LoadASTOnly, Diags,
- FileSystemOpts, HSOpts,
+ FileSystemOpts, HSOpts, /*LangOpts=*/nullptr,
/*OnlyLocalDecls=*/true, CaptureDiagsKind::None,
/*AllowASTWithCompilerErrors=*/true,
/*UserFilesAreVolatile=*/false);
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
index 08d1c0e8e660..562a643edfcf 100644
--- a/clang/tools/clang-installapi/Options.cpp
+++ b/clang/tools/clang-installapi/Options.cpp
@@ -22,14 +22,7 @@ namespace installapi {
bool Options::processDriverOptions(InputArgList &Args) {
// Handle inputs.
- llvm::vfs::Status Stat;
- for (const auto &Path : Args.getAllArgValues(OPT_INPUT)) {
- if (FM->getNoncachedStatValue(Path, Stat) || !Stat.exists()) {
- Diags->Report(clang::diag::err_drv_no_such_file) << Path;
- return false;
- }
- DriverOpts.FileLists.push_back(std::move(Path));
- }
+ llvm::append_range(DriverOpts.FileLists, Args.getAllArgValues(OPT_INPUT));
// Handle output.
SmallString<PATH_MAX> OutputPath;
@@ -61,8 +54,9 @@ bool Options::processDriverOptions(InputArgList &Args) {
// Capture target triples first.
if (ArgTarget) {
- for (auto *Arg : Args.filtered(OPT_target)) {
- llvm::Triple TargetTriple(Arg->getValue());
+ for (const Arg *A : Args.filtered(OPT_target)) {
+ A->claim();
+ llvm::Triple TargetTriple(A->getValue());
Target TAPITarget = Target(TargetTriple);
if ((TAPITarget.Arch == AK_unknown) ||
(TAPITarget.Platform == PLATFORM_UNKNOWN)) {
diff --git a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
index 095cf5ed3816..576e8f2cd7f8 100644
--- a/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
+++ b/clang/tools/clang-linker-wrapper/ClangLinkerWrapper.cpp
@@ -454,9 +454,11 @@ Expected<StringRef> clang(ArrayRef<StringRef> InputFiles, const ArgList &Args) {
Triple.isAMDGPU() ? Args.MakeArgString("-mcpu=" + Arch)
: Args.MakeArgString("-march=" + Arch),
Args.MakeArgString("-" + OptLevel),
- "-Wl,--no-undefined",
};
+ if (!Triple.isNVPTX())
+ CmdArgs.push_back("-Wl,--no-undefined");
+
for (StringRef InputFile : InputFiles)
CmdArgs.push_back(InputFile);
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index 4ded92cbe9ae..418b152ba4a1 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -3890,7 +3890,7 @@ enum CXErrorCode clang_createTranslationUnit2(CXIndex CIdx,
std::unique_ptr<ASTUnit> AU = ASTUnit::LoadFromASTFile(
ast_filename, CXXIdx->getPCHContainerOperations()->getRawReader(),
ASTUnit::LoadEverything, Diags, FileSystemOpts, HSOpts,
- CXXIdx->getOnlyLocalDecls(), CaptureDiagsKind::All,
+ /*LangOpts=*/nullptr, CXXIdx->getOnlyLocalDecls(), CaptureDiagsKind::All,
/*AllowASTWithCompilerErrors=*/true,
/*UserFilesAreVolatile=*/true);
*out_TU = MakeCXTranslationUnit(CXXIdx, std::move(AU));
diff --git a/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp b/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
index 3bca9cced8d6..34f9b0b23719 100644
--- a/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
+++ b/clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
@@ -77,17 +77,33 @@ protected:
return runDataflowAnalysis(*CFCtx, Analysis, Env);
}
+ /// Returns the `CFGBlock` containing `S` (and asserts that it exists).
+ const CFGBlock *blockForStmt(const Stmt &S) {
+ const CFGBlock *Block = CFCtx->getStmtToBlock().lookup(&S);
+ assert(Block != nullptr);
+ return Block;
+ }
+
template <typename StateT>
const StateT &
blockStateForStmt(const std::vector<std::optional<StateT>> &BlockStates,
- const Stmt *S) {
- const CFGBlock *Block = CFCtx->getStmtToBlock().lookup(S);
- assert(Block != nullptr);
- const std::optional<StateT> &MaybeState = BlockStates[Block->getBlockID()];
+ const Stmt &S) {
+ const std::optional<StateT> &MaybeState =
+ BlockStates[blockForStmt(S)->getBlockID()];
assert(MaybeState.has_value());
return *MaybeState;
}
+ /// Returns the first node that matches `Matcher` (and asserts that the match
+ /// was successful, i.e. the returned node is not null).
+ template <typename NodeT, typename MatcherT>
+ const NodeT &matchNode(MatcherT Matcher) {
+ const auto *Node = selectFirst<NodeT>(
+ "node", match(Matcher.bind("node"), AST->getASTContext()));
+ assert(Node != nullptr);
+ return *Node;
+ }
+
std::unique_ptr<ASTUnit> AST;
std::unique_ptr<ControlFlowContext> CFCtx;
std::unique_ptr<DataflowAnalysisContext> DACtx;
@@ -130,6 +146,79 @@ TEST_F(DataflowAnalysisTest, DiagnoseFunctionDiagnoserCalledOnEachElement) {
" (Lifetime ends)\n")));
}
+// Tests for the statement-to-block map.
+using StmtToBlockTest = DataflowAnalysisTest;
+
+TEST_F(StmtToBlockTest, ConditionalOperator) {
+ std::string Code = R"(
+ void target(bool b) {
+ int i = b ? 1 : 0;
+ }
+ )";
+ ASSERT_THAT_ERROR(runAnalysis<NoopAnalysis>(
+ Code, [](ASTContext &C) { return NoopAnalysis(C); })
+ .takeError(),
+ llvm::Succeeded());
+
+ const auto &IDecl = matchNode<DeclStmt>(declStmt(has(varDecl(hasName("i")))));
+ const auto &ConditionalOp =
+ matchNode<ConditionalOperator>(conditionalOperator());
+
+ // The conditional operator should be associated with the same block as the
+ // `DeclStmt` for `i`. (Specifically, the conditional operator should not be
+ // associated with the block for which it is the terminator.)
+ EXPECT_EQ(blockForStmt(IDecl), blockForStmt(ConditionalOp));
+}
+
+TEST_F(StmtToBlockTest, LogicalAnd) {
+ std::string Code = R"(
+ void target(bool b1, bool b2) {
+ bool b = b1 && b2;
+ }
+ )";
+ ASSERT_THAT_ERROR(runAnalysis<NoopAnalysis>(
+ Code, [](ASTContext &C) { return NoopAnalysis(C); })
+ .takeError(),
+ llvm::Succeeded());
+
+ const auto &BDecl = matchNode<DeclStmt>(declStmt(has(varDecl(hasName("b")))));
+ const auto &AndOp =
+ matchNode<BinaryOperator>(binaryOperator(hasOperatorName("&&")));
+
+ // The `&&` operator should be associated with the same block as the
+ // `DeclStmt` for `b`. (Specifically, the `&&` operator should not be
+ // associated with the block for which it is the terminator.)
+ EXPECT_EQ(blockForStmt(BDecl), blockForStmt(AndOp));
+}
+
+TEST_F(StmtToBlockTest, IfStatementWithLogicalAnd) {
+ std::string Code = R"(
+ void target(bool b1, bool b2) {
+ if (b1 && b2)
+ ;
+ }
+ )";
+ ASSERT_THAT_ERROR(runAnalysis<NoopAnalysis>(
+ Code, [](ASTContext &C) { return NoopAnalysis(C); })
+ .takeError(),
+ llvm::Succeeded());
+
+ const auto &If = matchNode<IfStmt>(ifStmt());
+ const auto &B2 =
+ matchNode<DeclRefExpr>(declRefExpr(to(varDecl(hasName("b2")))));
+ const auto &AndOp =
+ matchNode<BinaryOperator>(binaryOperator(hasOperatorName("&&")));
+
+ // The if statement is the terminator for the block that contains both `b2`
+ // and the `&&` operator (which appears only as a terminator condition, not
+ // as a regular `CFGElement`).
+ const CFGBlock *IfBlock = blockForStmt(If);
+ const CFGBlock *B2Block = blockForStmt(B2);
+ const CFGBlock *AndOpBlock = blockForStmt(AndOp);
+ EXPECT_EQ(IfBlock, B2Block);
+ EXPECT_EQ(IfBlock, AndOpBlock);
+}
+
// Tests that check we discard state for expressions correctly.
using DiscardExprStateTest = DataflowAnalysisTest;
@@ -144,25 +233,20 @@ TEST_F(DiscardExprStateTest, WhileStatement) {
auto BlockStates = llvm::cantFail(runAnalysis<NoopAnalysis>(
Code, [](ASTContext &C) { return NoopAnalysis(C); }));
- auto *NotEqOp = selectFirst<BinaryOperator>(
- "op", match(binaryOperator(hasOperatorName("!=")).bind("op"),
- AST->getASTContext()));
- ASSERT_NE(NotEqOp, nullptr);
-
- auto *CallFoo = selectFirst<CallExpr>(
- "call", match(callExpr(callee(functionDecl(hasName("foo")))).bind("call"),
- AST->getASTContext()));
- ASSERT_NE(CallFoo, nullptr);
+ const auto &NotEqOp =
+ matchNode<BinaryOperator>(binaryOperator(hasOperatorName("!=")));
+ const auto &CallFoo =
+ matchNode<CallExpr>(callExpr(callee(functionDecl(hasName("foo")))));
// In the block that evaluates the expression `p != nullptr`, this expression
// is associated with a value.
const auto &NotEqOpState = blockStateForStmt(BlockStates, NotEqOp);
- EXPECT_NE(NotEqOpState.Env.getValue(*NotEqOp), nullptr);
+ EXPECT_NE(NotEqOpState.Env.getValue(NotEqOp), nullptr);
// In the block that calls `foo(p)`, the value for `p != nullptr` is discarded
// because it is not consumed by this block.
const auto &CallFooState = blockStateForStmt(BlockStates, CallFoo);
- EXPECT_EQ(CallFooState.Env.getValue(*NotEqOp), nullptr);
+ EXPECT_EQ(CallFooState.Env.getValue(NotEqOp), nullptr);
}
TEST_F(DiscardExprStateTest, BooleanOperator) {
@@ -174,29 +258,24 @@ TEST_F(DiscardExprStateTest, BooleanOperator) {
auto BlockStates = llvm::cantFail(runAnalysis<NoopAnalysis>(
Code, [](ASTContext &C) { return NoopAnalysis(C); }));
- auto *AndOp = selectFirst<BinaryOperator>(
- "op", match(binaryOperator(hasOperatorName("&&")).bind("op"),
- AST->getASTContext()));
- ASSERT_NE(AndOp, nullptr);
-
- auto *Return = selectFirst<ReturnStmt>(
- "return", match(returnStmt().bind("return"), AST->getASTContext()));
- ASSERT_NE(Return, nullptr);
+ const auto &AndOp =
+ matchNode<BinaryOperator>(binaryOperator(hasOperatorName("&&")));
+ const auto &Return = matchNode<ReturnStmt>(returnStmt());
// In the block that evaluates the LHS of the `&&` operator, the LHS is
// associated with a value, while the right-hand side is not (unsurprisingly,
// as it hasn't been evaluated yet).
- const auto &LHSState = blockStateForStmt(BlockStates, AndOp->getLHS());
- auto *LHSValue = cast<BoolValue>(LHSState.Env.getValue(*AndOp->getLHS()));
+ const auto &LHSState = blockStateForStmt(BlockStates, *AndOp.getLHS());
+ auto *LHSValue = cast<BoolValue>(LHSState.Env.getValue(*AndOp.getLHS()));
ASSERT_NE(LHSValue, nullptr);
- EXPECT_EQ(LHSState.Env.getValue(*AndOp->getRHS()), nullptr);
+ EXPECT_EQ(LHSState.Env.getValue(*AndOp.getRHS()), nullptr);
// In the block that evaluates the RHS, the RHS is associated with a
// value. The value for the LHS has been discarded as it is not consumed by
// this block.
- const auto &RHSState = blockStateForStmt(BlockStates, AndOp->getRHS());
- EXPECT_EQ(RHSState.Env.getValue(*AndOp->getLHS()), nullptr);
- auto *RHSValue = cast<BoolValue>(RHSState.Env.getValue(*AndOp->getRHS()));
+ const auto &RHSState = blockStateForStmt(BlockStates, *AndOp.getRHS());
+ EXPECT_EQ(RHSState.Env.getValue(*AndOp.getLHS()), nullptr);
+ auto *RHSValue = cast<BoolValue>(RHSState.Env.getValue(*AndOp.getRHS()));
ASSERT_NE(RHSValue, nullptr);
// In the block that evaluates the return statement, the expression `b1 && b2`
@@ -217,9 +296,9 @@ TEST_F(DiscardExprStateTest, BooleanOperator) {
// operands, rather than from the environment for the block that contains the
// `&&`.
const auto &ReturnState = blockStateForStmt(BlockStates, Return);
- EXPECT_EQ(ReturnState.Env.getValue(*AndOp->getLHS()), nullptr);
- EXPECT_EQ(ReturnState.Env.getValue(*AndOp->getRHS()), nullptr);
- EXPECT_EQ(ReturnState.Env.getValue(*AndOp),
+ EXPECT_EQ(ReturnState.Env.getValue(*AndOp.getLHS()), nullptr);
+ EXPECT_EQ(ReturnState.Env.getValue(*AndOp.getRHS()), nullptr);
+ EXPECT_EQ(ReturnState.Env.getValue(AndOp),
&ReturnState.Env.makeAnd(*LHSValue, *RHSValue));
}
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 8282e75bd847..b8dc01f55b4f 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -13503,6 +13503,12 @@ TEST_F(FormatTest, IncorrectCodeUnbalancedBraces) {
verifyFormat("{");
verifyFormat("#})");
verifyNoCrash("(/**/[:!] ?[).");
+ verifyNoCrash("struct X {\n"
+ " operator iunt(\n"
+ "};");
+ verifyNoCrash("struct Foo {\n"
+ " operator foo(bar\n"
+ "};");
}
TEST_F(FormatTest, IncorrectUnbalancedBracesInMacrosWithUnicode) {
diff --git a/clang/unittests/Format/FormatTestMacroExpansion.cpp b/clang/unittests/Format/FormatTestMacroExpansion.cpp
index 653ec2a94c64..85ab6ea3794e 100644
--- a/clang/unittests/Format/FormatTestMacroExpansion.cpp
+++ b/clang/unittests/Format/FormatTestMacroExpansion.cpp
@@ -48,7 +48,7 @@ TEST_F(FormatTestMacroExpansion, UnexpandConfiguredMacros) {
)",
Style);
verifyIncompleteFormat("ID3({, ID(a *b),\n"
- " ;\n"
+ " ;\n"
" });",
Style);
@@ -131,9 +131,9 @@ ID(CALL(CALL(a * b)));
EXPECT_EQ(R"(
ID3(
{
- CLASS
- a *b;
- };
+ CLASS
+ a *b;
+ };
},
ID(x *y);
,
@@ -287,6 +287,19 @@ TEST_F(FormatTestMacroExpansion,
Style);
}
+TEST_F(FormatTestMacroExpansion, IndentChildrenWithinMacroCall) {
+ FormatStyle Style = getGoogleStyleWithColumns(22);
+ Style.Macros.push_back("MACRO(a, b)=a=(b)");
+ verifyFormat("void f() {\n"
+ " MACRO(a b, call([] {\n"
+ " if (expr) {\n"
+ " indent();\n"
+ " }\n"
+ " }));\n"
+ "}",
+ Style);
+}
+
} // namespace
} // namespace test
} // namespace format
diff --git a/clang/unittests/Format/MacroCallReconstructorTest.cpp b/clang/unittests/Format/MacroCallReconstructorTest.cpp
index 6e6900577d16..9df21eae70cb 100644
--- a/clang/unittests/Format/MacroCallReconstructorTest.cpp
+++ b/clang/unittests/Format/MacroCallReconstructorTest.cpp
@@ -151,17 +151,21 @@ public:
Lex.Allocator, Lex.IdentTable);
}
- UnwrappedLine line(llvm::ArrayRef<FormatToken *> Tokens) {
+ UnwrappedLine line(llvm::ArrayRef<FormatToken *> Tokens, unsigned Level = 0) {
UnwrappedLine Result;
+ Result.Level = Level;
for (FormatToken *Tok : Tokens)
Result.Tokens.push_back(UnwrappedLineNode(Tok));
return Result;
}
- UnwrappedLine line(llvm::StringRef Text) { return line({lex(Text)}); }
+ UnwrappedLine line(llvm::StringRef Text, unsigned Level = 0) {
+ return line({lex(Text)}, Level);
+ }
- UnwrappedLine line(llvm::ArrayRef<Chunk> Chunks) {
+ UnwrappedLine line(llvm::ArrayRef<Chunk> Chunks, unsigned Level = 0) {
UnwrappedLine Result;
+ Result.Level = Level;
for (const Chunk &Chunk : Chunks) {
Result.Tokens.insert(Result.Tokens.end(), Chunk.Tokens.begin(),
Chunk.Tokens.end());
@@ -186,6 +190,8 @@ public:
};
bool matchesTokens(const UnwrappedLine &L1, const UnwrappedLine &L2) {
+ if (L1.Level != L2.Level)
+ return false;
if (L1.Tokens.size() != L2.Tokens.size())
return false;
for (auto L1It = L1.Tokens.begin(), L2It = L2.Tokens.begin();
@@ -288,7 +294,8 @@ TEST_F(MacroCallReconstructorTest, StatementSequence) {
matchesLine(line(
{U1.consume("SEMI"),
children({line({U2.consume("SEMI"),
- children({line(U3.consume("SEMI"))})})})})));
+ children({line(U3.consume("SEMI"), 2)})},
+ 1)})})));
}
TEST_F(MacroCallReconstructorTest, NestedBlock) {
@@ -337,9 +344,9 @@ TEST_F(MacroCallReconstructorTest, NestedBlock) {
auto Expected = line({Chunk2Start,
children({
- line(Chunk2LBrace),
- line({Chunk1, Chunk2Mid}),
- line(Chunk2RBrace),
+ line(Chunk2LBrace, 1),
+ line({Chunk1, Chunk2Mid}, 1),
+ line(Chunk2RBrace, 1),
}),
Chunk2End});
EXPECT_THAT(std::move(Unexp).takeResult(), matchesLine(Expected));
@@ -379,9 +386,11 @@ TEST_F(MacroCallReconstructorTest, NestedChildBlocks) {
Unexp.addLine(
line({E.consume("f([] {"),
children({line({E.consume("f([] {"),
- children({line(E.consume("return a * b;"))}),
- E.consume("})")})}),
- E.consume("})")}));
+ children({line(E.consume("return a * b;"), 3)}),
+ E.consume("})")},
+ 2)}),
+ E.consume("})")},
+ 1));
Unexp.addLine(line(E.consume("}")));
EXPECT_TRUE(Unexp.finished());
@@ -407,13 +416,15 @@ TEST_F(MacroCallReconstructorTest, NestedChildBlocks) {
auto Expected = line({
Chunk3Start,
children({
- line(Chunk3LBrace),
- line({
- Chunk2Start,
- Chunk1,
- Chunk2End,
- }),
- line(Chunk3RBrace),
+ line(Chunk3LBrace, 1),
+ line(
+ {
+ Chunk2Start,
+ Chunk1,
+ Chunk2End,
+ },
+ 2),
+ line(Chunk3RBrace, 1),
}),
Chunk3End,
});
@@ -469,8 +480,8 @@ TEST_F(MacroCallReconstructorTest, MultipleToplevelUnwrappedLines) {
auto Expected = line({
U.consume("ID("),
children({
- line(U.consume("x;")),
- line(U.consume("x")),
+ line(U.consume("x;"), 1),
+ line(U.consume("x"), 1),
}),
U.consume(", y)"),
});
@@ -524,9 +535,9 @@ TEST_F(MacroCallReconstructorTest, NestedCallsMultipleLines) {
auto Expected = line({
Chunk2Start,
children({
- line({Chunk2LBrace}),
- line({Chunk1, Chunk2Semi}),
- line({Chunk2RBrace}),
+ line({Chunk2LBrace}, 1),
+ line({Chunk1, Chunk2Semi}, 1),
+ line({Chunk2RBrace}, 1),
}),
Chunk2End,
});
@@ -556,15 +567,17 @@ TEST_F(MacroCallReconstructorTest, ParentOutsideMacroCall) {
auto Expected = line({
Prefix,
children({
- line({
- U.consume("ID("),
- children({
- line(U.consume("x;")),
- line(U.consume("y;")),
- line(U.consume("z;")),
- }),
- U.consume(")"),
- }),
+ line(
+ {
+ U.consume("ID("),
+ children({
+ line(U.consume("x;"), 2),
+ line(U.consume("y;"), 2),
+ line(U.consume("z;"), 2),
+ }),
+ U.consume(")"),
+ },
+ 1),
}),
Postfix,
});
@@ -590,7 +603,7 @@ TEST_F(MacroCallReconstructorTest, ChildrenSplitAcrossArguments) {
Matcher U(Call, Lex);
auto Expected = line({
U.consume("CALL({"),
- children(line(U.consume("a;"))),
+ children(line(U.consume("a;"), 1)),
U.consume(", b; })"),
});
EXPECT_THAT(std::move(Unexp).takeResult(), matchesLine(Expected));
@@ -620,16 +633,20 @@ TEST_F(MacroCallReconstructorTest, ChildrenAfterMacroCall) {
Matcher U(Call, Lex);
auto Expected = line({
U.consume("CALL({"),
- children(line(U.consume("a"))),
+ children(line(U.consume("a"), 1)),
U.consume(", b)"),
Semi,
- children(line({
- SecondLine,
- children(line({
- ThirdLine,
- Postfix,
- })),
- })),
+ children(line(
+ {
+ SecondLine,
+ children(line(
+ {
+ ThirdLine,
+ Postfix,
+ },
+ 2)),
+ },
+ 1)),
});
EXPECT_THAT(std::move(Unexp).takeResult(), matchesLine(Expected));
}
@@ -655,7 +672,37 @@ TEST_F(MacroCallReconstructorTest, InvalidCodeSplittingBracesAcrossArgs) {
Matcher U(Call, Lex);
auto Expected = line({
Prefix,
- children({line(U.consume("M({,x,)"))}),
+ children({line(U.consume("M({,x,)"), 1)}),
+ });
+ EXPECT_THAT(std::move(Unexp).takeResult(), matchesLine(Expected));
+}
+
+TEST_F(MacroCallReconstructorTest, IndentLevelInExpandedCode) {
+ auto Macros = createExpander({"ID(a)=a"});
+ Expansion Exp(Lex, *Macros);
+ TokenList Call = Exp.expand("ID", {std::string("[] { { x; } }")});
+
+ MacroCallReconstructor Unexp(0, Exp.getUnexpanded());
+ Matcher E(Exp.getTokens(), Lex);
+ Unexp.addLine(line({
+ E.consume("[] {"),
+ children({
+ line(E.consume("{"), 1),
+ line(E.consume("x;"), 2),
+ line(E.consume("}"), 1),
+ }),
+ E.consume("}"),
+ }));
+ EXPECT_TRUE(Unexp.finished());
+ Matcher U(Call, Lex);
+ auto Expected = line({
+ U.consume("ID([] {"),
+ children({
+ line(U.consume("{"), 1),
+ line(U.consume("x;"), 2),
+ line(U.consume("}"), 1),
+ }),
+ U.consume("})"),
});
EXPECT_THAT(std::move(Unexp).takeResult(), matchesLine(Expected));
}
diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index 174304f09007..131397e3825b 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -1579,6 +1579,7 @@ void SVEEmitter::createSMEHeader(raw_ostream &OS) {
OS << "#endif\n";
OS << "#include <arm_sve.h>\n\n";
+ OS << "#include <stddef.h>\n\n";
OS << "/* Function attributes */\n";
OS << "#define __ai static __inline__ __attribute__((__always_inline__, "
@@ -1605,6 +1606,11 @@ void SVEEmitter::createSMEHeader(raw_ostream &OS) {
OS << " return x0 & 1;\n";
OS << "}\n\n";
+ OS << "void *__arm_sc_memcpy(void *dest, const void *src, size_t n) __arm_streaming_compatible;\n";
+ OS << "void *__arm_sc_memmove(void *dest, const void *src, size_t n) __arm_streaming_compatible;\n";
+ OS << "void *__arm_sc_memset(void *s, int c, size_t n) __arm_streaming_compatible;\n";
+ OS << "void *__arm_sc_memchr(void *s, int c, size_t n) __arm_streaming_compatible;\n\n";
+
OS << "__ai __attribute__((target(\"sme\"))) void svundef_za(void) "
"__arm_streaming_compatible __arm_out(\"za\") "
"{ }\n\n";
diff --git a/clang/www/cxx_dr_status.html b/clang/www/cxx_dr_status.html
index 38e2cb631426..8b638e06f4aa 100755
--- a/clang/www/cxx_dr_status.html
+++ b/clang/www/cxx_dr_status.html
@@ -16362,7 +16362,7 @@ and <I>POD class</I></td>
<td><a href="https://cplusplus.github.io/CWG/issues/2759.html">2759</a></td>
<td>DR</td>
<td>[[no_unique_address] and common initial sequence</td>
- <td class="unknown" align="center">Unknown</td>
+ <td class="unreleased" align="center">Clang 19</td>
</tr>
<tr id="2760">
<td><a href="https://cplusplus.github.io/CWG/issues/2760.html">2760</a></td>
diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c
index 1afa468c4ae8..7e8acb3e73ed 100644
--- a/compiler-rt/lib/builtins/cpu_model/x86.c
+++ b/compiler-rt/lib/builtins/cpu_model/x86.c
@@ -217,8 +217,8 @@ enum ProcessorFeatures {
FEATURE_SM3,
FEATURE_SHA512,
FEATURE_SM4,
- // FEATURE_APXF,
- FEATURE_USERMSR = 112,
+ FEATURE_APXF,
+ FEATURE_USERMSR,
FEATURE_AVX10_1_256,
FEATURE_AVX10_1_512,
CPU_FEATURE_MAX
@@ -983,6 +983,8 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
setFeature(FEATURE_USERMSR);
if (HasLeaf7Subleaf1 && ((EDX >> 19) & 1))
setFeature(FEATURE_AVX10_1_256);
+ if (HasLeaf7Subleaf1 && ((EDX >> 21) & 1))
+ setFeature(FEATURE_APXF);
unsigned MaxLevel;
getX86CpuIDAndInfo(0, &MaxLevel, &EBX, &ECX, &EDX);
diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp
index 52780becbdb2..ccdc0b4bc21b 100644
--- a/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/compiler-rt/lib/hwasan/hwasan.cpp
@@ -692,7 +692,7 @@ void __hwasan_handle_longjmp(const void *sp_dst) {
"WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: "
"stack top: %p; target %p; distance: %p (%zd)\n"
"False positive error reports may follow\n",
- (void *)sp, (void *)dst, dst - sp);
+ (void *)sp, (void *)dst, dst - sp, dst - sp);
return;
}
TagMemory(sp, dst - sp, 0);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 2f291f7ca9ea..a67b2a8725ec 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -288,7 +288,8 @@ uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
name ? name : name_, true);
}
-void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
+void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar,
+ bool raw_report) {
if (!addr || !size)
return;
size = RoundUpTo(size, GetPageSize());
@@ -301,11 +302,8 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
status = _zx_vmar_unmap(_zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(addr), size);
}
- if (status != ZX_OK) {
- Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- SanitizerToolName, size, size, addr);
- CHECK("unable to unmap" && 0);
- }
+ if (status != ZX_OK)
+ ReportMunmapFailureAndDie(addr, size, status, raw_report);
DecreaseTotalMmap(size);
}
@@ -327,7 +325,8 @@ void ReservedAddressRange::Unmap(uptr addr, uptr size) {
}
// Partial unmapping does not affect the fact that the initial range is still
// reserved, and the resulting unmapped memory can't be reused.
- UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
+ UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar,
+ /*raw_report=*/false);
}
// This should never be called.
@@ -413,8 +412,8 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
return reinterpret_cast<void *>(addr);
}
-void UnmapOrDie(void *addr, uptr size, bool) {
- UnmapOrDieVmar(addr, size, gSanitizerHeapVmar);
+void UnmapOrDie(void *addr, uptr size, bool raw_report) {
+ UnmapOrDieVmar(addr, size, gSanitizerHeapVmar, raw_report);
}
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 24ad979a9653..106bf71e241f 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -177,6 +177,18 @@ public:
mapAndInitializeRingBuffer(getFlags()->allocation_ring_buffer_size);
}
+ void enableRingBuffer() {
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB)
+ RB->Depot->enable();
+ }
+
+ void disableRingBuffer() {
+ AllocationRingBuffer *RB = getRingBuffer();
+ if (RB)
+ RB->Depot->disable();
+ }
+
bool resizeRingBuffer(int Size) { return mapAndInitializeRingBuffer(Size); }
// Initialize the embedded GWP-ASan instance. Requires the main allocator to
@@ -690,16 +702,12 @@ public:
Quarantine.disable();
Primary.disable();
Secondary.disable();
- AllocationRingBuffer *RB = getRingBuffer();
- if (RB)
- RB->disable();
+ disableRingBuffer();
}
void enable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
- AllocationRingBuffer *RB = getRingBuffer();
- if (RB)
- RB->enable();
+ enableRingBuffer();
Secondary.enable();
Primary.enable();
Quarantine.enable();
@@ -1074,10 +1082,6 @@ private:
atomic_uptr Pos;
// An array of Size (at least one) elements of type Entry is immediately
// following to this struct.
-
- void enable() { Depot->enable(); }
-
- void disable() { Depot->disable(); }
};
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
// and immediately followed by Size elements of type Entry.
@@ -1547,7 +1551,12 @@ private:
constexpr u32 kStacksPerRingBufferEntry = 2;
constexpr u32 kMaxU32Pow2 = ~(UINT32_MAX >> 1);
static_assert(isPowerOfTwo(kMaxU32Pow2));
- constexpr u32 kFramesPerStack = 8;
+ // On Android we always have 3 frames at the bottom: __start_main,
+ // __libc_init, main, and 3 at the top: malloc, scudo_malloc and
+ // Allocator::allocate. This leaves 10 frames for the user app. The next
+ // smallest power of two (8) would only leave 2, which is clearly too
+ // little.
+ constexpr u32 kFramesPerStack = 16;
static_assert(isPowerOfTwo(kFramesPerStack));
// We need StackDepot to be aligned to 8-bytes so the ring we store after
diff --git a/compiler-rt/test/builtins/Unit/aarch64_cpu_features_test.c b/compiler-rt/test/builtins/Unit/aarch64_cpu_features_test.c
new file mode 100644
index 000000000000..7ca2710ea275
--- /dev/null
+++ b/compiler-rt/test/builtins/Unit/aarch64_cpu_features_test.c
@@ -0,0 +1,17 @@
+// REQUIRES: aarch64-target-arch
+// REQUIRES: native-run
+// RUN: %clang_builtins %s %librt -o %t && %run %t
+// REQUIRES: librt_has_aarch64
+int main(void) {
+ if (__builtin_cpu_supports("fp+simd+pmull+sha2+crc")) {
+ if (__builtin_cpu_supports("fp") && __builtin_cpu_supports("simd") &&
+ __builtin_cpu_supports("pmull") && __builtin_cpu_supports("sha2") &&
+ __builtin_cpu_supports("crc")) {
+ return 0;
+ } else {
+ // Something wrong in feature detection
+ return 1;
+ }
+ }
+ return 0;
+}
diff --git a/compiler-rt/test/builtins/Unit/cpu_model_test.c b/compiler-rt/test/builtins/Unit/cpu_model_test.c
index a8b736802f67..6d5f17aa1256 100644
--- a/compiler-rt/test/builtins/Unit/cpu_model_test.c
+++ b/compiler-rt/test/builtins/Unit/cpu_model_test.c
@@ -1,6 +1,6 @@
// REQUIRES: x86-target-arch
// RUN: %clang_builtins %s %librt -o %t && %run %t
-// REQUIRES: librt_has_cpu_model
+// REQUIRES: librt_has_x86
// FIXME: XFAIL the test because it is expected to return non-zero value.
// XFAIL: *
diff --git a/flang/lib/Evaluate/fold-integer.cpp b/flang/lib/Evaluate/fold-integer.cpp
index 0e8706e0f274..25ae4831ab20 100644
--- a/flang/lib/Evaluate/fold-integer.cpp
+++ b/flang/lib/Evaluate/fold-integer.cpp
@@ -704,6 +704,7 @@ Expr<Type<TypeCategory::Integer, KIND>> FoldIntrinsicFunction(
return common::visit(
[&funcRef, &context, &FromInt64](const auto &str) -> Expr<T> {
using Char = typename std::decay_t<decltype(str)>::Result;
+ (void)FromInt64;
return FoldElementalIntrinsic<T, Char>(context,
std::move(funcRef),
ScalarFunc<T, Char>(
diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index 44e80e946ed8..849b3c8e4dc0 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -177,7 +177,7 @@ static void addAMDGPUSpecificMLIRItems(mlir::ModuleOp &mlirModule,
return;
}
- mlir::ConversionPatternRewriter builder(mlirModule.getContext());
+ mlir::IRRewriter builder(mlirModule.getContext());
unsigned oclcABIVERsion = codeGenOpts.CodeObjectVersion;
auto int32Type = builder.getI32Type();
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index a826f0181e58..215a3c9060a2 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -26,7 +26,7 @@
template <typename T>
static Fortran::semantics::Scope *GetScope(
Fortran::semantics::SemanticsContext &context, const T &x) {
- std::optional<Fortran::parser::CharBlock> source{GetSource(x)};
+ std::optional<Fortran::parser::CharBlock> source{GetLastSource(x)};
return source ? &context.FindScope(*source) : nullptr;
}
diff --git a/flang/test/Driver/omp-driver-offload.f90 b/flang/test/Driver/omp-driver-offload.f90
index b45ed70195fb..9b62699030c6 100644
--- a/flang/test/Driver/omp-driver-offload.f90
+++ b/flang/test/Driver/omp-driver-offload.f90
@@ -172,13 +172,27 @@
! Check that `-gpulibc` includes the LLVM C libraries for the GPU.
! RUN: %flang -### --target=x86_64-unknown-linux-gnu -fopenmp \
-! RUN: --offload-arch=gfx90a --offload-arch=sm_52 \
+! RUN: --offload-arch=sm_52 \
! RUN: -gpulibc %s 2>&1 \
-! RUN: | FileCheck --check-prefix=LIBC-GPU %s
-! LIBC-GPU: "-lcgpu"{{.*}}"-lmgpu"
+! RUN: | FileCheck --check-prefix=LIBC-GPU-NVPTX %s
+! LIBC-GPU-NVPTX-DAG: "-lcgpu-nvptx"
+! LIBC-GPU-NVPTX-DAG: "-lmgpu-nvptx"
! RUN: %flang -### --target=x86_64-unknown-linux-gnu -fopenmp \
-! RUN: --offload-arch=gfx90a --offload-arch=sm_52 \
+! RUN: --offload-arch=sm_52 \
! RUN: -nogpulibc %s 2>&1 \
-! RUN: | FileCheck --check-prefix=NO-LIBC-GPU %s
-! NO-LIBC-GPU-NOT: "-lcgpu"{{.*}}"-lmgpu"
+! RUN: | FileCheck --check-prefix=NO-LIBC-GPU-NVPTX %s
+! NO-LIBC-GPU-NVPTX-NOT: "-lcgpu-nvptx"
+
+! RUN: %flang -### --target=x86_64-unknown-linux-gnu -fopenmp \
+! RUN: --offload-arch=gfx90a \
+! RUN: -gpulibc %s 2>&1 \
+! RUN: | FileCheck --check-prefix=LIBC-GPU-AMDGPU %s
+! LIBC-GPU-AMDGPU-DAG: "-lcgpu-amdgpu"
+! LIBC-GPU-AMDGPU-DAG: "-lmgpu-amdgpu"
+
+! RUN: %flang -### --target=x86_64-unknown-linux-gnu -fopenmp \
+! RUN: --offload-arch=gfx90a \
+! RUN: -nogpulibc %s 2>&1 \
+! RUN: | FileCheck --check-prefix=NO-LIBC-GPU-AMDGPU %s
+! NO-LIBC-GPU-AMDGPU-NOT: "-lcgpu-amdgpu"
diff --git a/flang/test/Semantics/OpenMP/device-constructs.f90 b/flang/test/Semantics/OpenMP/device-constructs.f90
index 51f00700b6da..1ac00ef922c6 100644
--- a/flang/test/Semantics/OpenMP/device-constructs.f90
+++ b/flang/test/Semantics/OpenMP/device-constructs.f90
@@ -2,9 +2,11 @@
! Check OpenMP clause validity for the following directives:
! 2.10 Device constructs
program main
+ use iso_c_binding
real(8) :: arrayA(256), arrayB(256)
integer :: N
+ type(c_ptr) :: cptr
arrayA = 1.414
arrayB = 3.14
@@ -135,7 +137,15 @@ program main
enddo
!$omp end target data
- !ERROR: At least one of MAP clause must appear on the TARGET DATA directive
+ !$omp target data device(0) use_device_addr(cptr)
+ cptr = c_null_ptr
+ !$omp end target data
+
+ !$omp target data device(0) use_device_addr(cptr)
+ cptr = c_null_ptr
+ !$omp end target data
+
+ !ERROR: At least one of MAP, USE_DEVICE_ADDR, USE_DEVICE_PTR clause must appear on the TARGET DATA directive
!$omp target data device(0)
do i = 1, N
a = 3.14
diff --git a/flang/test/Semantics/OpenMP/struct.f90 b/flang/test/Semantics/OpenMP/struct.f90
new file mode 100644
index 000000000000..8ae1fbe4da86
--- /dev/null
+++ b/flang/test/Semantics/OpenMP/struct.f90
@@ -0,0 +1,7 @@
+! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
+! Check OpenMP compatibility with the DEC STRUCTURE extension
+
+structure /s/
+end structure
+
+end
diff --git a/flang/tools/bbc/bbc.cpp b/flang/tools/bbc/bbc.cpp
index c9358c83e795..e701fdeb227a 100644
--- a/flang/tools/bbc/bbc.cpp
+++ b/flang/tools/bbc/bbc.cpp
@@ -354,10 +354,11 @@ static mlir::LogicalResult convertFortranSourceToMLIR(
loweringOptions.setPolymorphicTypeImpl(enablePolymorphic);
loweringOptions.setNoPPCNativeVecElemOrder(enableNoPPCNativeVecElemOrder);
loweringOptions.setLowerToHighLevelFIR(useHLFIR || emitHLFIR);
+ std::vector<Fortran::lower::EnvironmentDefault> envDefaults = {};
auto burnside = Fortran::lower::LoweringBridge::create(
ctx, semanticsContext, defKinds, semanticsContext.intrinsics(),
semanticsContext.targetCharacteristics(), parsing.allCooked(),
- targetTriple, kindMap, loweringOptions, {},
+ targetTriple, kindMap, loweringOptions, envDefaults,
semanticsContext.languageFeatures(), targetMachine);
burnside.lower(parseTree, semanticsContext);
mlir::ModuleOp mlirModule = burnside.getModule();
diff --git a/libc/CMakeLists.txt b/libc/CMakeLists.txt
index 3d7757366167..6a57fcec26e4 100644
--- a/libc/CMakeLists.txt
+++ b/libc/CMakeLists.txt
@@ -43,7 +43,7 @@ set(LIBC_NAMESPACE "__llvm_libc_${LLVM_VERSION_MAJOR}_${LLVM_VERSION_MINOR}_${LL
CACHE STRING "The namespace to use to enclose internal implementations. Must start with '__llvm_libc'."
)
-if(LLVM_LIBC_FULL_BUILD OR LIBC_GPU_BUILD OR LIBC_GPU_ARCHITECTURES)
+if(LLVM_LIBC_FULL_BUILD OR LLVM_LIBC_GPU_BUILD)
if(NOT LIBC_HDRGEN_EXE)
# We need to set up hdrgen first since other targets depend on it.
add_subdirectory(utils/LibcTableGenUtil)
@@ -57,19 +57,32 @@ if(LLVM_LIBC_FULL_BUILD OR LIBC_GPU_BUILD OR LIBC_GPU_ARCHITECTURES)
endif()
endif()
+set(NEED_LIBC_HDRGEN FALSE)
+if(NOT LLVM_RUNTIMES_BUILD)
+ if("libc" IN_LIST LLVM_ENABLE_RUNTIMES)
+ set(NEED_LIBC_HDRGEN TRUE)
+ else()
+ foreach(_name ${LLVM_RUNTIME_TARGETS})
+ if("libc" IN_LIST RUNTIMES_${_name}_LLVM_ENABLE_RUNTIMES)
+ set(NEED_LIBC_HDRGEN TRUE)
+ break()
+ endif()
+ endforeach()
+ endif()
+endif()
option(LIBC_HDRGEN_ONLY "Only build the 'libc-hdrgen' executable" OFF)
-if(("libc" IN_LIST LLVM_ENABLE_RUNTIMES AND NOT LLVM_RUNTIMES_BUILD) OR
- LIBC_HDRGEN_ONLY)
+if(LIBC_HDRGEN_ONLY OR NEED_LIBC_HDRGEN)
# When libc is build as part of the runtimes/bootstrap build's CMake run, we
# only need to build the host tools to build the libc. So, we just do enough
# to build libc-hdrgen and return.
# Always make the RPC server availible to other projects for GPU mode.
- if(LIBC_GPU_BUILD OR LIBC_GPU_ARCHITECTURES)
+ if(LLVM_LIBC_GPU_BUILD)
add_subdirectory(utils/gpu/server)
endif()
return()
endif()
+unset(NEED_LIBC_HDRGEN)
option(LIBC_CMAKE_VERBOSE_LOGGING
"Log details warnings and notifications during CMake configuration." OFF)
@@ -105,7 +118,7 @@ if(COMMAND_RETURN_CODE EQUAL 0)
message(STATUS "Set COMPILER_RESOURCE_DIR to "
"${COMPILER_RESOURCE_DIR} using --print-resource-dir")
else()
- if (LIBC_TARGET_ARCHITECTURE_IS_GPU)
+ if (LIBC_TARGET_OS_IS_GPU)
message(FATAL_ERROR "COMPILER_RESOURCE_DIR must be set for GPU builds")
else()
set(COMPILER_RESOURCE_DIR OFF)
@@ -203,11 +216,7 @@ foreach(config_path IN LISTS LIBC_CONFIG_JSON_FILE_LIST)
load_libc_config(${config_path}/config.json ${cmd_line_conf})
endforeach()
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
- set(LIBC_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/include)
- set(LIBC_INSTALL_INCLUDE_DIR ${CMAKE_INSTALL_INCLUDEDIR}/gpu-none-llvm)
- set(LIBC_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR})
-elseif(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND LIBC_ENABLE_USE_BY_CLANG)
+if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND LIBC_ENABLE_USE_BY_CLANG)
set(LIBC_INCLUDE_DIR ${LLVM_BINARY_DIR}/include/${LLVM_DEFAULT_TARGET_TRIPLE})
set(LIBC_INSTALL_INCLUDE_DIR ${CMAKE_INSTALL_INCLUDEDIR}/${LLVM_DEFAULT_TARGET_TRIPLE})
set(LIBC_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LLVM_DEFAULT_TARGET_TRIPLE})
@@ -222,10 +231,23 @@ else()
set(LIBC_INCLUDE_DIR ${CMAKE_BINARY_DIR}/include)
set(LIBC_LIBRARY_DIR ${CMAKE_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX})
endif()
- set(LIBC_INSTALL_INCLUDE_DIR ${CMAKE_INSTALL_INCLUDEDIR})
+ if(LIBC_TARGET_OS_IS_GPU)
+ set(LIBC_INSTALL_INCLUDE_DIR ${CMAKE_INSTALL_INCLUDEDIR}/${LLVM_DEFAULT_TARGET_TRIPLE})
+ else()
+ set(LIBC_INSTALL_INCLUDE_DIR ${CMAKE_INSTALL_INCLUDEDIR})
+ endif()
+endif()
+
+if(LIBC_TARGET_TRIPLE)
+ set(LIBC_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX}/${LIBC_TARGET_TRIPLE})
+elseif(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT LIBC_GPU_BUILD)
+ set(LIBC_INSTALL_LIBRARY_DIR
+ lib${LLVM_LIBDIR_SUFFIX}/${LLVM_DEFAULT_TARGET_TRIPLE})
+else()
+ set(LIBC_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX})
endif()
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(LIBC_TARGET_OS_IS_GPU)
include(prepare_libc_gpu_build)
set(LIBC_ENABLE_UNITTESTS OFF)
endif()
diff --git a/libc/cmake/modules/LLVMLibCArchitectures.cmake b/libc/cmake/modules/LLVMLibCArchitectures.cmake
index 623ed774be72..0dbc59ad643a 100644
--- a/libc/cmake/modules/LLVMLibCArchitectures.cmake
+++ b/libc/cmake/modules/LLVMLibCArchitectures.cmake
@@ -6,18 +6,6 @@
# platform.
# ------------------------------------------------------------------------------
-if(LIBC_GPU_BUILD OR LIBC_GPU_ARCHITECTURES)
- # We set the generic target and OS to "gpu" here. More specific defintions
- # for the exact target GPU are set up in prepare_libc_gpu_build.cmake.
- set(LIBC_TARGET_OS "gpu")
- set(LIBC_TARGET_ARCHITECTURE_IS_GPU TRUE)
- set(LIBC_TARGET_ARCHITECTURE "gpu")
- if(LIBC_TARGET_TRIPLE)
- message(WARNING "LIBC_TARGET_TRIPLE is ignored as LIBC_GPU_BUILD is on. ")
- endif()
- return()
-endif()
-
if(MSVC)
# If the compiler is visual c++ or equivalent, we will assume a host build.
set(LIBC_TARGET_OS ${CMAKE_HOST_SYSTEM_NAME})
@@ -59,6 +47,10 @@ function(get_arch_and_system_from_triple triple arch_var sys_var)
set(target_arch "riscv32")
elseif(target_arch MATCHES "^riscv64")
set(target_arch "riscv64")
+ elseif(target_arch MATCHES "^amdgcn")
+ set(target_arch "amdgpu")
+ elseif(target_arch MATCHES "^nvptx64")
+ set(target_arch "nvptx")
else()
return()
endif()
@@ -75,6 +67,12 @@ function(get_arch_and_system_from_triple triple arch_var sys_var)
set(target_sys "darwin")
endif()
+ # Setting OS name for GPU architectures.
+ list(GET triple_comps -1 gpu_target_sys)
+ if(gpu_target_sys MATCHES "^amdhsa" OR gpu_target_sys MATCHES "^cuda")
+ set(target_sys "gpu")
+ endif()
+
set(${sys_var} ${target_sys} PARENT_SCOPE)
endfunction(get_arch_and_system_from_triple)
@@ -156,6 +154,10 @@ elseif(LIBC_TARGET_ARCHITECTURE STREQUAL "riscv64")
elseif(LIBC_TARGET_ARCHITECTURE STREQUAL "riscv32")
set(LIBC_TARGET_ARCHITECTURE_IS_RISCV32 TRUE)
set(LIBC_TARGET_ARCHITECTURE "riscv")
+elseif(LIBC_TARGET_ARCHITECTURE STREQUAL "amdgpu")
+ set(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU TRUE)
+elseif(LIBC_TARGET_ARCHITECTURE STREQUAL "nvptx")
+ set(LIBC_TARGET_ARCHITECTURE_IS_NVPTX TRUE)
else()
message(FATAL_ERROR
"Unsupported libc target architecture ${LIBC_TARGET_ARCHITECTURE}")
@@ -178,6 +180,8 @@ elseif(LIBC_TARGET_OS STREQUAL "darwin")
set(LIBC_TARGET_OS_IS_DARWIN TRUE)
elseif(LIBC_TARGET_OS STREQUAL "windows")
set(LIBC_TARGET_OS_IS_WINDOWS TRUE)
+elseif(LIBC_TARGET_OS STREQUAL "gpu")
+ set(LIBC_TARGET_OS_IS_GPU TRUE)
else()
message(FATAL_ERROR
"Unsupported libc target operating system ${LIBC_TARGET_OS}")
diff --git a/libc/cmake/modules/LLVMLibCCheckMPFR.cmake b/libc/cmake/modules/LLVMLibCCheckMPFR.cmake
index 9e361f5fd811..bbaeb9f0dc05 100644
--- a/libc/cmake/modules/LLVMLibCCheckMPFR.cmake
+++ b/libc/cmake/modules/LLVMLibCCheckMPFR.cmake
@@ -2,7 +2,7 @@ set(LLVM_LIBC_MPFR_INSTALL_PATH "" CACHE PATH "Path to where MPFR is installed (
if(LLVM_LIBC_MPFR_INSTALL_PATH)
set(LIBC_TESTS_CAN_USE_MPFR TRUE)
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
set(LIBC_TESTS_CAN_USE_MPFR FALSE)
else()
try_compile(
diff --git a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
index 140e4d51a9c2..408e25b3469c 100644
--- a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
+++ b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
@@ -82,10 +82,22 @@ function(_get_common_compile_options output_var flags)
list(APPEND compile_options "/EHs-c-")
list(APPEND compile_options "/GR-")
endif()
- if (LIBC_TARGET_ARCHITECTURE_IS_GPU)
+ if (LIBC_TARGET_OS_IS_GPU)
list(APPEND compile_options "-nogpulib")
list(APPEND compile_options "-fvisibility=hidden")
list(APPEND compile_options "-fconvergent-functions")
+ list(APPEND compile_options "-flto")
+
+ if(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ list(APPEND compile_options "-Wno-unknown-cuda-version")
+ list(APPEND compile_options "SHELL:-mllvm -nvptx-emit-init-fini-kernel=false")
+ list(APPEND compile_options "--cuda-feature=+ptx63")
+ if(LIBC_CUDA_ROOT)
+ list(APPEND compile_options "--cuda-path=${LIBC_CUDA_ROOT}")
+ endif()
+ elseif(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
+ list(APPEND compile_options "SHELL:-Xclang -mcode-object-version=none")
+ endif()
# Manually disable all standard include paths and include the resource
# directory to prevent system headers from being included.
@@ -138,73 +150,21 @@ function(_get_common_test_compile_options output_var flags)
set(${output_var} ${compile_options} PARENT_SCOPE)
endfunction()
-# Obtains NVPTX specific arguments for compilation.
-# The PTX feature is primarily based on the CUDA toolchain version. We want to
-# be able to target NVPTX without an existing CUDA installation, so we need to
-# set this manually. This simply sets the PTX feature to the minimum required
-# for the features we wish to use on that target. The minimum PTX features used
-# here roughly corresponds to the CUDA 9.0 release.
-# Adjust as needed for desired PTX features.
-function(get_nvptx_compile_options output_var gpu_arch)
- set(nvptx_options "")
- list(APPEND nvptx_options "-march=${gpu_arch}")
- list(APPEND nvptx_options "-Wno-unknown-cuda-version")
- list(APPEND nvptx_options "SHELL:-mllvm -nvptx-emit-init-fini-kernel=false")
- if(${gpu_arch} STREQUAL "sm_35")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_37")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_50")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_52")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_53")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_60")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_61")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_62")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_70")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_72")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_75")
- list(APPEND nvptx_options "--cuda-feature=+ptx63")
- elseif(${gpu_arch} STREQUAL "sm_80")
- list(APPEND nvptx_options "--cuda-feature=+ptx72")
- elseif(${gpu_arch} STREQUAL "sm_86")
- list(APPEND nvptx_options "--cuda-feature=+ptx72")
- elseif(${gpu_arch} STREQUAL "sm_89")
- list(APPEND nvptx_options "--cuda-feature=+ptx72")
- elseif(${gpu_arch} STREQUAL "sm_90")
- list(APPEND nvptx_options "--cuda-feature=+ptx72")
- else()
- message(FATAL_ERROR "Unknown Nvidia GPU architecture '${gpu_arch}'")
- endif()
-
- if(LIBC_CUDA_ROOT)
- list(APPEND nvptx_options "--cuda-path=${LIBC_CUDA_ROOT}")
- endif()
- set(${output_var} ${nvptx_options} PARENT_SCOPE)
-endfunction()
-
function(_get_hermetic_test_compile_options output_var flags)
_get_compile_options_from_flags(compile_flags ${flags})
list(APPEND compile_options ${LIBC_COMPILE_OPTIONS_DEFAULT} ${compile_flags}
${flags} -fpie -ffreestanding -fno-exceptions -fno-rtti)
# The GPU build requires overriding the default CMake triple and architecture.
- if(LIBC_GPU_TARGET_ARCHITECTURE_IS_AMDGPU)
+ if(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
list(APPEND compile_options
- -nogpulib -mcpu=${LIBC_GPU_TARGET_ARCHITECTURE} -flto
- --target=${LIBC_GPU_TARGET_TRIPLE}
+ -Wno-multi-gpu -nogpulib -mcpu=${LIBC_GPU_TARGET_ARCHITECTURE} -flto
-mcode-object-version=${LIBC_GPU_CODE_OBJECT_VERSION})
- elseif(LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
- get_nvptx_compile_options(nvptx_options ${LIBC_GPU_TARGET_ARCHITECTURE})
+ elseif(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
list(APPEND compile_options
- -nogpulib ${nvptx_options} -fno-use-cxa-atexit --target=${LIBC_GPU_TARGET_TRIPLE})
+ "SHELL:-mllvm -nvptx-emit-init-fini-kernel=false"
+ -Wno-multi-gpu --cuda-path=${LIBC_CUDA_ROOT}
+ -nogpulib -march=${LIBC_GPU_TARGET_ARCHITECTURE} -fno-use-cxa-atexit)
endif()
set(${output_var} ${compile_options} PARENT_SCOPE)
endfunction()
diff --git a/libc/cmake/modules/LLVMLibCHeaderRules.cmake b/libc/cmake/modules/LLVMLibCHeaderRules.cmake
index 9e9b598721ab..19515b1cbcc1 100644
--- a/libc/cmake/modules/LLVMLibCHeaderRules.cmake
+++ b/libc/cmake/modules/LLVMLibCHeaderRules.cmake
@@ -139,7 +139,7 @@ function(add_gen_header target_name)
${hdrgen_deps}
)
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+ if(LIBC_TARGET_OS_IS_GPU)
file(MAKE_DIRECTORY ${LIBC_INCLUDE_DIR}/llvm-libc-decls)
set(decl_out_file ${LIBC_INCLUDE_DIR}/llvm-libc-decls/${relative_path})
add_custom_command(
diff --git a/libc/cmake/modules/LLVMLibCLibraryRules.cmake b/libc/cmake/modules/LLVMLibCLibraryRules.cmake
index 81c207ec2317..f15ffd5f9c21 100644
--- a/libc/cmake/modules/LLVMLibCLibraryRules.cmake
+++ b/libc/cmake/modules/LLVMLibCLibraryRules.cmake
@@ -50,31 +50,9 @@ function(collect_object_file_deps target result)
endif()
endfunction(collect_object_file_deps)
-# A rule to build a library from a collection of entrypoint objects.
-# Usage:
-# add_entrypoint_library(
-# DEPENDS <list of add_entrypoint_object targets>
-# )
-#
-# NOTE: If one wants an entrypoint to be available in a library, then they will
-# have to list the entrypoint target explicitly in the DEPENDS list. Implicit
-# entrypoint dependencies will not be added to the library.
-function(add_entrypoint_library target_name)
- cmake_parse_arguments(
- "ENTRYPOINT_LIBRARY"
- "" # No optional arguments
- "" # No single value arguments
- "DEPENDS" # Multi-value arguments
- ${ARGN}
- )
- if(NOT ENTRYPOINT_LIBRARY_DEPENDS)
- message(FATAL_ERROR "'add_entrypoint_library' target requires a DEPENDS list "
- "of 'add_entrypoint_object' targets.")
- endif()
-
- get_fq_deps_list(fq_deps_list ${ENTRYPOINT_LIBRARY_DEPENDS})
+function(get_all_object_file_deps result fq_deps_list)
set(all_deps "")
- foreach(dep IN LISTS fq_deps_list)
+ foreach(dep ${fq_deps_list})
get_target_property(dep_type ${dep} "TARGET_TYPE")
if(NOT ((${dep_type} STREQUAL ${ENTRYPOINT_OBJ_TARGET_TYPE}) OR
(${dep_type} STREQUAL ${ENTRYPOINT_EXT_TARGET_TYPE}) OR
@@ -102,6 +80,121 @@ function(add_entrypoint_library target_name)
list(APPEND all_deps ${entrypoint_target})
endforeach(dep)
list(REMOVE_DUPLICATES all_deps)
+ set(${result} ${all_deps} PARENT_SCOPE)
+endfunction()
+
+# A rule to build a library from a collection of entrypoint objects and bundle
+# it into a GPU fatbinary. Usage is the same as 'add_entrypoint_library'.
+# Usage:
+# add_gpu_entrypoint_library(
+# DEPENDS <list of add_entrypoint_object targets>
+# )
+function(add_gpu_entrypoint_library target_name)
+ cmake_parse_arguments(
+ "ENTRYPOINT_LIBRARY"
+ "" # No optional arguments
+ "" # No single value arguments
+ "DEPENDS" # Multi-value arguments
+ ${ARGN}
+ )
+ if(NOT ENTRYPOINT_LIBRARY_DEPENDS)
+ message(FATAL_ERROR "'add_entrypoint_library' target requires a DEPENDS list "
+ "of 'add_entrypoint_object' targets.")
+ endif()
+
+ get_fq_deps_list(fq_deps_list ${ENTRYPOINT_LIBRARY_DEPENDS})
+ get_all_object_file_deps(all_deps "${fq_deps_list}")
+
+ # The GPU 'libc' needs to be exported in a format that can be linked with
+ # offloading langauges like OpenMP or CUDA. This wraps every GPU object into a
+ # fat binary and adds them to a static library.
+ set(objects "")
+ foreach(dep IN LISTS all_deps)
+ set(object $<$<STREQUAL:$<TARGET_NAME_IF_EXISTS:${dep}>,${dep}>:$<TARGET_OBJECTS:${dep}>>)
+ string(FIND ${dep} "." last_dot_loc REVERSE)
+ math(EXPR name_loc "${last_dot_loc} + 1")
+ string(SUBSTRING ${dep} ${name_loc} -1 name)
+ if(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ set(prefix --image=arch=generic,triple=nvptx64-nvidia-cuda,feature=+ptx63)
+ elseif(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
+ set(prefix --image=arch=generic,triple=amdgcn-amd-amdhsa)
+ endif()
+
+ # Use the 'clang-offload-packager' to merge these files into a binary blob.
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/binary/${name}.gpubin"
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/binary
+ COMMAND ${LIBC_CLANG_OFFLOAD_PACKAGER}
+ "${prefix},file=$<JOIN:${object},,file=>" -o
+ ${CMAKE_CURRENT_BINARY_DIR}/binary/${name}.gpubin
+ DEPENDS ${dep}
+ COMMENT "Packaging LLVM offloading binary for '${object}'"
+ )
+ add_custom_target(${dep}.__gpubin__ DEPENDS ${dep}
+ "${CMAKE_CURRENT_BINARY_DIR}/binary/${name}.gpubin")
+
+ # CMake does not permit setting the name on object files. In order to have
+ # human readable names we create an empty stub file with the entrypoint
+ # name. This empty file will then have the created binary blob embedded.
+ add_custom_command(
+ OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/stubs/${name}.cpp"
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/stubs
+ COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/stubs/${name}.cpp
+ DEPENDS ${dep} ${dep}.__gpubin__
+ )
+ add_custom_target(${dep}.__stub__
+ DEPENDS ${dep}.__gpubin__ "${CMAKE_CURRENT_BINARY_DIR}/stubs/${name}.cpp")
+
+ add_library(${dep}.__fatbin__
+ EXCLUDE_FROM_ALL OBJECT
+ "${CMAKE_CURRENT_BINARY_DIR}/stubs/${name}.cpp"
+ )
+
+ # This is always compiled for the LLVM host triple instead of the native GPU
+ # triple that is used by default in the build.
+ target_compile_options(${dep}.__fatbin__ BEFORE PRIVATE -nostdlib)
+ target_compile_options(${dep}.__fatbin__ PRIVATE
+ --target=${LLVM_HOST_TRIPLE}
+ "SHELL:-Xclang -fembed-offload-object=${CMAKE_CURRENT_BINARY_DIR}/binary/${name}.gpubin")
+ add_dependencies(${dep}.__fatbin__ ${dep} ${dep}.__stub__ ${dep}.__gpubin__)
+
+ # Set the list of newly create fat binaries containing embedded device code.
+ list(APPEND objects $<TARGET_OBJECTS:${dep}.__fatbin__>)
+ endforeach()
+
+ add_library(
+ ${target_name}
+ STATIC
+ ${objects}
+ )
+ set_target_properties(${target_name} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${LIBC_LIBRARY_DIR})
+endfunction(add_gpu_entrypoint_library)
+
+# A rule to build a library from a collection of entrypoint objects.
+# Usage:
+# add_entrypoint_library(
+# DEPENDS <list of add_entrypoint_object targets>
+# )
+#
+# NOTE: If one wants an entrypoint to be available in a library, then they will
+# have to list the entrypoint target explicitly in the DEPENDS list. Implicit
+# entrypoint dependencies will not be added to the library.
+function(add_entrypoint_library target_name)
+ cmake_parse_arguments(
+ "ENTRYPOINT_LIBRARY"
+ "" # No optional arguments
+ "" # No single value arguments
+ "DEPENDS" # Multi-value arguments
+ ${ARGN}
+ )
+ if(NOT ENTRYPOINT_LIBRARY_DEPENDS)
+ message(FATAL_ERROR "'add_entrypoint_library' target requires a DEPENDS list "
+ "of 'add_entrypoint_object' targets.")
+ endif()
+
+ get_fq_deps_list(fq_deps_list ${ENTRYPOINT_LIBRARY_DEPENDS})
+ get_all_object_file_deps(all_deps "${fq_deps_list}")
+
set(objects "")
foreach(dep IN LISTS all_deps)
list(APPEND objects $<$<STREQUAL:$<TARGET_NAME_IF_EXISTS:${dep}>,${dep}>:$<TARGET_OBJECTS:${dep}>>)
diff --git a/libc/cmake/modules/LLVMLibCObjectRules.cmake b/libc/cmake/modules/LLVMLibCObjectRules.cmake
index 308ba7d0d5dd..78536f4eec55 100644
--- a/libc/cmake/modules/LLVMLibCObjectRules.cmake
+++ b/libc/cmake/modules/LLVMLibCObjectRules.cmake
@@ -1,175 +1,5 @@
set(OBJECT_LIBRARY_TARGET_TYPE "OBJECT_LIBRARY")
-# Build the object target for a single GPU arch.
-# Usage:
-# _build_gpu_object_for_single_arch(
-# <target_name>
-# <gpu_arch>
-# SRCS <list of .cpp files>
-# HDRS <list of .h files>
-# DEPENDS <list of dependencies>
-# COMPILE_OPTIONS <optional list of special compile options for this target>
-# FLAGS <optional list of flags>
-# )
-function(_build_gpu_object_for_single_arch fq_target_name gpu_arch)
- cmake_parse_arguments(
- "ADD_GPU_OBJ"
- "" # No optional arguments
- "NAME;CXX_STANDARD" # Single value arguments
- "SRCS;HDRS;DEPENDS;COMPILE_OPTIONS;FLAGS" # Multi value arguments
- ${ARGN}
- )
-
- if(NOT ADD_GPU_OBJ_CXX_STANDARD)
- set(ADD_GPU_OBJ_CXX_STANDARD ${CMAKE_CXX_STANDARD})
- endif()
-
- set(compile_options ${ADD_GPU_OBJ_COMPILE_OPTIONS})
- # Derive the triple from the specified architecture.
- if("${gpu_arch}" IN_LIST all_amdgpu_architectures)
- set(gpu_target_triple ${AMDGPU_TARGET_TRIPLE})
- list(APPEND compile_options "-mcpu=${gpu_arch}")
- list(APPEND compile_options "SHELL:-Xclang -mcode-object-version=none")
- list(APPEND compile_options "-emit-llvm")
- elseif("${gpu_arch}" IN_LIST all_nvptx_architectures)
- set(gpu_target_triple ${NVPTX_TARGET_TRIPLE})
- get_nvptx_compile_options(nvptx_options ${gpu_arch})
- list(APPEND compile_options "${nvptx_options}")
- else()
- message(FATAL_ERROR "Unknown GPU architecture '${gpu_arch}'")
- endif()
- list(APPEND compile_options "--target=${gpu_target_triple}")
-
- # Build the library for this target architecture. We always emit LLVM-IR for
- # packaged GPU binaries.
- add_library(${fq_target_name}
- EXCLUDE_FROM_ALL
- OBJECT
- ${ADD_GPU_OBJ_SRCS}
- ${ADD_GPU_OBJ_HDRS}
- )
-
- target_compile_options(${fq_target_name} PRIVATE ${compile_options})
- target_include_directories(${fq_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
- target_include_directories(${fq_target_name} PRIVATE ${LIBC_SOURCE_DIR})
- set_target_properties(${fq_target_name} PROPERTIES CXX_STANDARD ${ADD_GPU_OBJ_CXX_STANDARD})
- if(ADD_GPU_OBJ_DEPENDS)
- add_dependencies(${fq_target_name} ${ADD_GPU_OBJ_DEPENDS})
- set_target_properties(${fq_target_name} PROPERTIES DEPS "${ADD_GPU_OBJ_DEPENDS}")
- endif()
-endfunction(_build_gpu_object_for_single_arch)
-
-# Build the object target for the GPU.
-# This compiles the target for all supported architectures and embeds it into
-# host binary for installing.
-# Usage:
-# _build_gpu_object_bundle(
-# <target_name>
-# SRCS <list of .cpp files>
-# HDRS <list of .h files>
-# DEPENDS <list of dependencies>
-# COMPILE_OPTIONS <optional list of special compile options for this target>
-# FLAGS <optional list of flags>
-# )
-function(_build_gpu_object_bundle fq_target_name)
- cmake_parse_arguments(
- "ADD_GPU_OBJ"
- "" # No optional arguments
- "NAME;CXX_STANDARD" # Single value arguments
- "SRCS;HDRS;DEPENDS;COMPILE_OPTIONS;FLAGS" # Multi value arguments
- ${ARGN}
- )
-
- if(NOT ADD_GPU_OBJ_CXX_STANDARD)
- set(ADD_GPU_OBJ_CXX_STANDARD ${CMAKE_CXX_STANDARD})
- endif()
-
- foreach(add_gpu_obj_src ${ADD_GPU_OBJ_SRCS})
- # The packaged version will be built for every target GPU architecture. We do
- # this so we can support multiple accelerators on the same machine.
- foreach(gpu_arch ${LIBC_GPU_ARCHITECTURES})
- get_filename_component(src_name ${add_gpu_obj_src} NAME)
- set(gpu_target_name ${fq_target_name}.${src_name}.${gpu_arch})
-
- _build_gpu_object_for_single_arch(
- ${gpu_target_name}
- ${gpu_arch}
- CXX_STANDARD ${ADD_GPU_OBJ_CXX_STANDARD}
- HDRS ${ADD_GPU_OBJ_HDRS}
- SRCS ${add_gpu_obj_src}
- COMPILE_OPTIONS
- ${ADD_GPU_OBJ_COMPILE_OPTIONS}
- "-emit-llvm"
- DEPENDS ${ADD_GPU_OBJ_DEPENDS}
- )
- # Append this target to a list of images to package into a single binary.
- set(input_file $<TARGET_OBJECTS:${gpu_target_name}>)
- if("${gpu_arch}" IN_LIST all_nvptx_architectures)
- get_nvptx_compile_options(nvptx_options ${gpu_arch})
- string(REGEX MATCH "\\+ptx[0-9]+" nvptx_ptx_feature ${nvptx_options})
- list(APPEND packager_images
- --image=file=${input_file},arch=${gpu_arch},triple=${NVPTX_TARGET_TRIPLE},feature=${nvptx_ptx_feature})
- else()
- list(APPEND packager_images
- --image=file=${input_file},arch=${gpu_arch},triple=${AMDGPU_TARGET_TRIPLE})
- endif()
- list(APPEND gpu_target_objects ${input_file})
- endforeach()
-
- # After building the target for the desired GPUs we must package the output
- # into a fatbinary, see https://clang.llvm.org/docs/OffloadingDesign.html for
- # more information.
- set(packaged_target_name ${fq_target_name}.${src_name}.__gpu__)
- set(packaged_output_name ${CMAKE_CURRENT_BINARY_DIR}/${fq_target_name}.${src_name}.gpubin)
-
- add_custom_command(OUTPUT ${packaged_output_name}
- COMMAND ${LIBC_CLANG_OFFLOAD_PACKAGER}
- ${packager_images} -o ${packaged_output_name}
- DEPENDS ${gpu_target_objects} ${add_gpu_obj_src} ${ADD_GPU_OBJ_HDRS}
- COMMENT "Packaging LLVM offloading binary")
- add_custom_target(${packaged_target_name} DEPENDS ${packaged_output_name})
- list(APPEND packaged_gpu_names ${packaged_target_name})
- list(APPEND packaged_gpu_binaries ${packaged_output_name})
- endforeach()
-
- # We create an empty 'stub' file for the host to contain the embedded device
- # code. This will be packaged into 'libcgpu.a'.
- # TODO: In the future we will want to combine every architecture for a target
- # into a single bitcode file and use that. For now we simply build for
- # every single one and let the offloading linker handle it.
- string(FIND ${fq_target_name} "." last_dot_loc REVERSE)
- math(EXPR name_loc "${last_dot_loc} + 1")
- string(SUBSTRING ${fq_target_name} ${name_loc} -1 target_name)
- set(stub_filename "${target_name}.cpp")
- add_custom_command(
- OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/stubs/${stub_filename}"
- COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/stubs/
- COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/stubs/${stub_filename}
- DEPENDS ${gpu_target_objects} ${ADD_GPU_OBJ_SRCS} ${ADD_GPU_OBJ_HDRS}
- )
- set(stub_target_name ${fq_target_name}.__stub__)
- add_custom_target(${stub_target_name} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/stubs/${stub_filename})
-
- add_library(
- ${fq_target_name}
- # We want an object library as the objects will eventually get packaged into
- # an archive (like libcgpu.a).
- EXCLUDE_FROM_ALL
- OBJECT
- ${CMAKE_CURRENT_BINARY_DIR}/stubs/${stub_filename}
- )
- target_compile_options(${fq_target_name} BEFORE PRIVATE
- ${ADD_GPU_OBJ_COMPILE_OPTIONS} -nostdlib)
- foreach(packaged_gpu_binary ${packaged_gpu_binaries})
- target_compile_options(${fq_target_name} PRIVATE
- "SHELL:-Xclang -fembed-offload-object=${packaged_gpu_binary}")
- endforeach()
- target_include_directories(${fq_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
- target_include_directories(${fq_target_name} PRIVATE ${LIBC_SOURCE_DIR})
- add_dependencies(${fq_target_name}
- ${full_deps_list} ${packaged_gpu_names} ${stub_target_name})
-endfunction()
-
# Rule which is essentially a wrapper over add_library to compile a set of
# sources to object files.
# Usage:
@@ -214,53 +44,37 @@ function(create_object_library fq_target_name)
message(FATAL_ERROR "'add_object_library' rule requires SRCS to be specified.")
endif()
- # The GPU build uses a separate internal file.
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU AND NOT ${ADD_OBJECT_NO_GPU_BUNDLE})
- set(internal_target_name ${fq_target_name}.__internal__)
- set(public_packaging_for_internal "")
- else()
- set(internal_target_name ${fq_target_name})
- set(public_packaging_for_internal "-DLIBC_COPT_PUBLIC_PACKAGING")
- endif()
+ set(internal_target_name ${fq_target_name}.__internal__)
+ set(public_packaging_for_internal "-DLIBC_COPT_PUBLIC_PACKAGING")
_get_common_compile_options(compile_options "${ADD_OBJECT_FLAGS}")
list(APPEND compile_options ${ADD_OBJECT_COMPILE_OPTIONS})
- # GPU builds require special handling for the objects because we want to
- # export several different targets at once, e.g. for both Nvidia and AMD.
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
- if(NOT ${ADD_OBJECT_NO_GPU_BUNDLE})
- _build_gpu_object_bundle(
- ${fq_target_name}
- SRCS ${ADD_OBJECT_SRCS}
- HDRS ${ADD_OBJECT_HDRS}
- CXX_STANDARD ${ADD_OBJECT_CXX_STANDARD}
- COMPILE_OPTIONS ${compile_options} "-DLIBC_COPT_PUBLIC_PACKAGING"
- DEPENDS ${fq_deps_list}
- )
- endif()
- # When the target for GPU is not bundled, internal_target_name is the same
- # as fq_targetname
- _build_gpu_object_for_single_arch(
- ${internal_target_name}
- ${LIBC_GPU_TARGET_ARCHITECTURE}
- SRCS ${ADD_OBJECT_SRCS}
- HDRS ${ADD_OBJECT_HDRS}
- CXX_STANDARD ${ADD_OBJECT_CXX_STANDARD}
- COMPILE_OPTIONS ${compile_options} ${public_packaging_for_internal}
- DEPENDS ${fq_deps_list}
- )
- else()
+ add_library(
+ ${fq_target_name}
+ EXCLUDE_FROM_ALL
+ OBJECT
+ ${ADD_OBJECT_SRCS}
+ ${ADD_OBJECT_HDRS}
+ )
+ target_include_directories(${fq_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
+ target_include_directories(${fq_target_name} PRIVATE ${LIBC_SOURCE_DIR})
+ target_compile_options(${fq_target_name} PRIVATE ${compile_options})
+
+ # The NVPTX target is installed as LLVM-IR but the internal testing toolchain
+ # cannot handle it natively. Make a separate internal target for testing.
+ if(LIBC_TARGET_ARCHITECTURE_IS_NVPTX AND NOT LIBC_GPU_TESTS_DISABLED)
add_library(
- ${fq_target_name}
+ ${internal_target_name}
EXCLUDE_FROM_ALL
OBJECT
${ADD_OBJECT_SRCS}
${ADD_OBJECT_HDRS}
)
- target_include_directories(${fq_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
- target_include_directories(${fq_target_name} PRIVATE ${LIBC_SOURCE_DIR})
- target_compile_options(${fq_target_name} PRIVATE ${compile_options})
+ target_include_directories(${internal_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
+ target_include_directories(${internal_target_name} PRIVATE ${LIBC_SOURCE_DIR})
+ target_compile_options(${internal_target_name} PRIVATE ${compile_options}
+ -fno-lto -march=${LIBC_GPU_TARGET_ARCHITECTURE})
endif()
if(SHOW_INTERMEDIATE_OBJECTS)
@@ -290,13 +104,18 @@ function(create_object_library fq_target_name)
FLAGS "${ADD_OBJECT_FLAGS}"
)
+ # If we built a separate internal target we want to use those target objects
+ # for testing instead of the exported target.
+ set(target_objects ${fq_target_name})
if(TARGET ${internal_target_name})
- set_target_properties(
- ${fq_target_name}
- PROPERTIES
- OBJECT_FILES "$<TARGET_OBJECTS:${internal_target_name}>"
- )
+ set(target_objects ${internal_target_name})
endif()
+
+ set_target_properties(
+ ${fq_target_name}
+ PROPERTIES
+ OBJECT_FILES "$<TARGET_OBJECTS:${target_objects}>"
+ )
endfunction(create_object_library)
function(add_object_library target_name)
@@ -389,12 +208,19 @@ function(create_entrypoint_object fq_target_name)
get_target_property(object_file ${fq_dep_name} "OBJECT_FILE")
get_target_property(object_file_raw ${fq_dep_name} "OBJECT_FILE_RAW")
- add_library(
- ${internal_target_name}
- EXCLUDE_FROM_ALL
- OBJECT
- ${object_file_raw}
- )
+
+ # If the system cannot build the GPU tests we simply make a dummy target.
+ if(LIBC_TARGET_OS_IS_GPU AND LIBC_GPU_TESTS_DISABLED)
+ add_custom_target(${internal_target_name})
+ else()
+ add_library(
+ ${internal_target_name}
+ EXCLUDE_FROM_ALL
+ OBJECT
+ ${object_file_raw}
+ )
+ endif()
+
add_dependencies(${internal_target_name} ${fq_dep_name})
add_library(
${fq_target_name}
@@ -441,60 +267,42 @@ function(create_entrypoint_object fq_target_name)
endif()
endif()
- # GPU builds require special handling for the objects because we want to
- # export several different targets at once, e.g. for both Nvidia and AMD.
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
- _build_gpu_object_bundle(
- ${fq_target_name}
- SRCS ${ADD_ENTRYPOINT_OBJ_SRCS}
- HDRS ${ADD_ENTRYPOINT_OBJ_HDRS}
- COMPILE_OPTIONS ${common_compile_options} "-DLIBC_COPT_PUBLIC_PACKAGING"
- CXX_STANDARD ${ADD_ENTRYPOINT_OBJ_CXX_STANDARD}
- DEPENDS ${full_deps_list}
- FLAGS "${ADD_ENTRYPOINT_OBJ_FLAGS}"
- )
- _build_gpu_object_for_single_arch(
- ${internal_target_name}
- ${LIBC_GPU_TARGET_ARCHITECTURE}
- SRCS ${ADD_ENTRYPOINT_OBJ_SRCS}
- HDRS ${ADD_ENTRYPOINT_OBJ_HDRS}
- COMPILE_OPTIONS ${common_compile_options}
- CXX_STANDARD ${ADD_ENTRYPOINT_OBJ_CXX_STANDARD}
- DEPENDS ${full_deps_list}
- FLAGS "${ADD_ENTRYPOINT_OBJ_FLAGS}"
- )
- else()
- add_library(
- ${internal_target_name}
- # TODO: We don't need an object library for internal consumption.
- # A future change should switch this to a normal static library.
- EXCLUDE_FROM_ALL
- OBJECT
- ${ADD_ENTRYPOINT_OBJ_SRCS}
- ${ADD_ENTRYPOINT_OBJ_HDRS}
- )
- target_compile_options(${internal_target_name} BEFORE PRIVATE ${common_compile_options})
- target_include_directories(${internal_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
- target_include_directories(${internal_target_name} PRIVATE ${LIBC_SOURCE_DIR})
- add_dependencies(${internal_target_name} ${full_deps_list})
- target_link_libraries(${internal_target_name} ${full_deps_list})
-
- add_library(
- ${fq_target_name}
- # We want an object library as the objects will eventually get packaged into
- # an archive (like libc.a).
- EXCLUDE_FROM_ALL
- OBJECT
- ${ADD_ENTRYPOINT_OBJ_SRCS}
- ${ADD_ENTRYPOINT_OBJ_HDRS}
- )
- target_compile_options(${fq_target_name} BEFORE PRIVATE ${common_compile_options} -DLIBC_COPT_PUBLIC_PACKAGING)
- target_include_directories(${fq_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
- target_include_directories(${fq_target_name} PRIVATE ${LIBC_SOURCE_DIR})
- add_dependencies(${fq_target_name} ${full_deps_list})
- target_link_libraries(${fq_target_name} ${full_deps_list})
+ add_library(
+ ${internal_target_name}
+ # TODO: We don't need an object library for internal consumption.
+ # A future change should switch this to a normal static library.
+ EXCLUDE_FROM_ALL
+ OBJECT
+ ${ADD_ENTRYPOINT_OBJ_SRCS}
+ ${ADD_ENTRYPOINT_OBJ_HDRS}
+ )
+ target_compile_options(${internal_target_name} BEFORE PRIVATE ${common_compile_options})
+ target_include_directories(${internal_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
+ target_include_directories(${internal_target_name} PRIVATE ${LIBC_SOURCE_DIR})
+ add_dependencies(${internal_target_name} ${full_deps_list})
+ target_link_libraries(${internal_target_name} ${full_deps_list})
+
+ # The NVPTX target cannot use LTO for the internal targets used for testing.
+ if(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ target_compile_options(${internal_target_name} PRIVATE
+ -fno-lto -march=${LIBC_GPU_TARGET_ARCHITECTURE})
endif()
+ add_library(
+ ${fq_target_name}
+ # We want an object library as the objects will eventually get packaged into
+ # an archive (like libc.a).
+ EXCLUDE_FROM_ALL
+ OBJECT
+ ${ADD_ENTRYPOINT_OBJ_SRCS}
+ ${ADD_ENTRYPOINT_OBJ_HDRS}
+ )
+ target_compile_options(${fq_target_name} BEFORE PRIVATE ${common_compile_options} -DLIBC_COPT_PUBLIC_PACKAGING)
+ target_include_directories(${fq_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
+ target_include_directories(${fq_target_name} PRIVATE ${LIBC_SOURCE_DIR})
+ add_dependencies(${fq_target_name} ${full_deps_list})
+ target_link_libraries(${fq_target_name} ${full_deps_list})
+
set_target_properties(
${fq_target_name}
PROPERTIES
diff --git a/libc/cmake/modules/LLVMLibCTestRules.cmake b/libc/cmake/modules/LLVMLibCTestRules.cmake
index 6ca9516ff7a0..9d1e426a5b7b 100644
--- a/libc/cmake/modules/LLVMLibCTestRules.cmake
+++ b/libc/cmake/modules/LLVMLibCTestRules.cmake
@@ -449,7 +449,7 @@ function(add_integration_test test_name)
${fq_build_target_name}
EXCLUDE_FROM_ALL
# The NVIDIA 'nvlink' linker does not currently support static libraries.
- $<$<BOOL:${LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX}>:${link_object_files}>
+ $<$<BOOL:${LIBC_TARGET_ARCHITECTURE_IS_NVPTX}>:${link_object_files}>
${INTEGRATION_TEST_SRCS}
${INTEGRATION_TEST_HDRS}
)
@@ -461,8 +461,20 @@ function(add_integration_test test_name)
_get_hermetic_test_compile_options(compile_options "${INTEGRATION_TEST_COMPILE_OPTIONS}")
target_compile_options(${fq_build_target_name} PRIVATE ${compile_options})
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
- target_link_options(${fq_build_target_name} PRIVATE -nostdlib -static)
+ if(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
+ target_link_options(${fq_build_target_name} PRIVATE
+ ${LIBC_COMPILE_OPTIONS_DEFAULT}
+ -mcpu=${LIBC_GPU_TARGET_ARCHITECTURE} -flto
+ "-Wl,-mllvm,-amdgpu-lower-global-ctor-dtor=0" -nostdlib -static
+ "-Wl,-mllvm,-amdhsa-code-object-version=${LIBC_GPU_CODE_OBJECT_VERSION}")
+ elseif(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ # We need to use the internal object versions for NVPTX.
+ set(internal_suffix ".__internal__")
+ target_link_options(${fq_build_target_name} PRIVATE
+ ${LIBC_COMPILE_OPTIONS_DEFAULT}
+ "-Wl,--suppress-stack-size-warning"
+ -march=${LIBC_GPU_TARGET_ARCHITECTURE} -nostdlib -static
+ "--cuda-path=${LIBC_CUDA_ROOT}")
elseif(LIBC_CC_SUPPORTS_NOSTDLIBPP)
target_link_options(${fq_build_target_name} PRIVATE -nolibc -nostartfiles -nostdlib++ -static)
else()
@@ -474,9 +486,10 @@ function(add_integration_test test_name)
target_link_libraries(
${fq_build_target_name}
# The NVIDIA 'nvlink' linker does not currently support static libraries.
- $<$<NOT:$<BOOL:${LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX}>>:${fq_target_name}.__libc__>
- libc.startup.${LIBC_TARGET_OS}.crt1
- libc.test.IntegrationTest.test)
+ $<$<NOT:$<BOOL:${LIBC_TARGET_ARCHITECTURE_IS_NVPTX}>>:${fq_target_name}.__libc__>
+ libc.startup.${LIBC_TARGET_OS}.crt1${internal_suffix}
+ libc.test.IntegrationTest.test${internal_suffix}
+ )
add_dependencies(${fq_build_target_name}
libc.test.IntegrationTest.test
${INTEGRATION_TEST_DEPENDS})
@@ -495,7 +508,7 @@ function(add_integration_test test_name)
# makes `add_custom_target` construct the correct command and execute it.
set(test_cmd
${INTEGRATION_TEST_ENV}
- $<$<BOOL:${LIBC_TARGET_ARCHITECTURE_IS_GPU}>:${gpu_loader_exe}>
+ $<$<BOOL:${LIBC_TARGET_OS_IS_GPU}>:${gpu_loader_exe}>
${CMAKE_CROSSCOMPILING_EMULATOR}
${INTEGRATION_TEST_LOADER_ARGS}
$<TARGET_FILE:${fq_build_target_name}> ${INTEGRATION_TEST_ARGS})
@@ -606,7 +619,7 @@ function(add_libc_hermetic_test test_name)
${fq_build_target_name}
EXCLUDE_FROM_ALL
# The NVIDIA 'nvlink' linker does not currently support static libraries.
- $<$<BOOL:${LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX}>:${link_object_files}>
+ $<$<BOOL:${LIBC_TARGET_ARCHITECTURE_IS_NVPTX}>:${link_object_files}>
${HERMETIC_TEST_SRCS}
${HERMETIC_TEST_HDRS}
)
@@ -615,6 +628,8 @@ function(add_libc_hermetic_test test_name)
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
#OUTPUT_NAME ${fq_target_name}
)
+
+ _get_hermetic_test_compile_options(compile_options "${HERMETIC_TEST_COMPILE_OPTIONS}")
target_include_directories(${fq_build_target_name} SYSTEM PRIVATE ${LIBC_INCLUDE_DIR})
target_include_directories(${fq_build_target_name} PRIVATE ${LIBC_SOURCE_DIR})
_get_hermetic_test_compile_options(compile_options "${HERMETIC_TEST_COMPILE_OPTIONS}")
@@ -629,8 +644,20 @@ function(add_libc_hermetic_test test_name)
endif()
endforeach()
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
- target_link_options(${fq_build_target_name} PRIVATE -nostdlib -static)
+ if(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
+ target_link_options(${fq_build_target_name} PRIVATE
+ ${LIBC_COMPILE_OPTIONS_DEFAULT}
+ -mcpu=${LIBC_GPU_TARGET_ARCHITECTURE} -flto
+ "-Wl,-mllvm,-amdgpu-lower-global-ctor-dtor=0" -nostdlib -static
+ "-Wl,-mllvm,-amdhsa-code-object-version=${LIBC_GPU_CODE_OBJECT_VERSION}")
+ elseif(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ # We need to use the internal object versions for NVPTX.
+ set(internal_suffix ".__internal__")
+ target_link_options(${fq_build_target_name} PRIVATE
+ ${LIBC_COMPILE_OPTIONS_DEFAULT}
+ "-Wl,--suppress-stack-size-warning"
+ -march=${LIBC_GPU_TARGET_ARCHITECTURE} -nostdlib -static
+ "--cuda-path=${LIBC_CUDA_ROOT}")
elseif(LIBC_CC_SUPPORTS_NOSTDLIBPP)
target_link_options(${fq_build_target_name} PRIVATE -nolibc -nostartfiles -nostdlib++ -static)
else()
@@ -642,12 +669,12 @@ function(add_libc_hermetic_test test_name)
target_link_libraries(
${fq_build_target_name}
PRIVATE
- libc.startup.${LIBC_TARGET_OS}.crt1
+ libc.startup.${LIBC_TARGET_OS}.crt1${internal_suffix}
${link_libraries}
LibcTest.hermetic
LibcHermeticTestSupport.hermetic
# The NVIDIA 'nvlink' linker does not currently support static libraries.
- $<$<NOT:$<BOOL:${LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX}>>:${fq_target_name}.__libc__>)
+ $<$<NOT:$<BOOL:${LIBC_TARGET_ARCHITECTURE_IS_NVPTX}>>:${fq_target_name}.__libc__>)
add_dependencies(${fq_build_target_name}
LibcTest.hermetic
libc.test.UnitTest.ErrnoSetterMatcher
@@ -660,7 +687,7 @@ function(add_libc_hermetic_test test_name)
endif()
set(test_cmd ${HERMETIC_TEST_ENV}
- $<$<BOOL:${LIBC_TARGET_ARCHITECTURE_IS_GPU}>:${gpu_loader_exe}> ${CMAKE_CROSSCOMPILING_EMULATOR} ${HERMETIC_TEST_LOADER_ARGS}
+ $<$<BOOL:${LIBC_TARGET_OS_IS_GPU}>:${gpu_loader_exe}> ${CMAKE_CROSSCOMPILING_EMULATOR} ${HERMETIC_TEST_LOADER_ARGS}
$<TARGET_FILE:${fq_build_target_name}> ${HERMETIC_TEST_ARGS})
add_custom_target(
${fq_target_name}
diff --git a/libc/cmake/modules/prepare_libc_gpu_build.cmake b/libc/cmake/modules/prepare_libc_gpu_build.cmake
index 2086175bae6c..548990a1c18b 100644
--- a/libc/cmake/modules/prepare_libc_gpu_build.cmake
+++ b/libc/cmake/modules/prepare_libc_gpu_build.cmake
@@ -1,27 +1,12 @@
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
message(FATAL_ERROR
"libc build: Invalid attempt to set up GPU architectures.")
endif()
-# Set up the target architectures to build the GPU libc for.
-set(all_amdgpu_architectures "gfx700;gfx701;gfx801;gfx803;gfx900;gfx902;gfx906"
- "gfx908;gfx90a;gfx90c;gfx940;gfx941;gfx942"
- "gfx1010;gfx1030;gfx1031;gfx1032;gfx1033;gfx1034"
- "gfx1035;gfx1036"
- "gfx1100;gfx1101;gfx1102;gfx1103;gfx1150;gfx1151")
-set(all_nvptx_architectures "sm_35;sm_37;sm_50;sm_52;sm_53;sm_60;sm_61;sm_62"
- "sm_70;sm_72;sm_75;sm_80;sm_86;sm_89;sm_90")
-set(all_gpu_architectures
- "${all_amdgpu_architectures};${all_nvptx_architectures}")
-set(LIBC_GPU_ARCHITECTURES "all" CACHE STRING
- "List of GPU architectures to build the libc for.")
-set(AMDGPU_TARGET_TRIPLE "amdgcn-amd-amdhsa")
-set(NVPTX_TARGET_TRIPLE "nvptx64-nvidia-cuda")
-
# Ensure the compiler is a valid clang when building the GPU target.
set(req_ver "${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}")
-if(NOT (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang" AND
- ${CMAKE_CXX_COMPILER_VERSION} VERSION_EQUAL "${req_ver}"))
+if(LLVM_VERSION_MAJOR AND NOT (CMAKE_CXX_COMPILER_ID MATCHES "[Cc]lang" AND
+ ${CMAKE_CXX_COMPILER_VERSION} VERSION_EQUAL "${req_ver}"))
message(FATAL_ERROR "Cannot build libc for GPU. CMake compiler "
"'${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}' "
" is not 'Clang ${req_ver}'.")
@@ -31,44 +16,11 @@ if(NOT LLVM_LIBC_FULL_BUILD)
"GPU.")
endif()
-# Identify any locally installed AMD GPUs on the system using 'amdgpu-arch'.
-find_program(LIBC_AMDGPU_ARCH
- NAMES amdgpu-arch NO_DEFAULT_PATH
- PATHS ${LLVM_BINARY_DIR}/bin /opt/rocm/llvm/bin/)
-
-# Identify any locally installed NVIDIA GPUs on the system using 'nvptx-arch'.
-find_program(LIBC_NVPTX_ARCH
- NAMES nvptx-arch NO_DEFAULT_PATH
- PATHS ${LLVM_BINARY_DIR}/bin)
-
-# Get the list of all natively supported GPU architectures.
-set(detected_gpu_architectures "")
-foreach(arch_tool ${LIBC_NVPTX_ARCH} ${LIBC_AMDGPU_ARCH})
- if(arch_tool)
- execute_process(COMMAND ${arch_tool}
- OUTPUT_VARIABLE arch_tool_output
- ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
- string(REPLACE "\n" ";" arch_list "${arch_tool_output}")
- list(APPEND detected_gpu_architectures "${arch_list}")
- endif()
-endforeach()
-list(REMOVE_DUPLICATES detected_gpu_architectures)
-
-if(LIBC_GPU_ARCHITECTURES STREQUAL "all")
- set(LIBC_GPU_ARCHITECTURES ${all_gpu_architectures})
-elseif(LIBC_GPU_ARCHITECTURES STREQUAL "native")
- if(NOT detected_gpu_architectures)
- message(FATAL_ERROR "No GPUs found on the system when using 'native'")
- endif()
- set(LIBC_GPU_ARCHITECTURES ${detected_gpu_architectures})
-endif()
-message(STATUS "Building libc for the following GPU architecture(s): "
- "${LIBC_GPU_ARCHITECTURES}")
-
# Identify the program used to package multiple images into a single binary.
+get_filename_component(compiler_path ${CMAKE_CXX_COMPILER} DIRECTORY)
find_program(LIBC_CLANG_OFFLOAD_PACKAGER
NAMES clang-offload-packager NO_DEFAULT_PATH
- PATHS ${LLVM_BINARY_DIR}/bin)
+ PATHS ${LLVM_BINARY_DIR}/bin ${compiler_path})
if(NOT LIBC_CLANG_OFFLOAD_PACKAGER)
message(FATAL_ERROR "Cannot find the 'clang-offload-packager' for the GPU "
"build")
@@ -87,49 +39,54 @@ else()
endif()
set(LIBC_GPU_TEST_ARCHITECTURE "" CACHE STRING "Architecture for the GPU tests")
+if(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
+ check_cxx_compiler_flag("-nogpulib -mcpu=native" PLATFORM_HAS_GPU)
+elseif(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ # Identify any locally installed NVIDIA GPUs on the system using 'nvptx-arch'.
+ # Using 'check_cxx_compiler_flag' does not work currently due to the link job.
+ find_program(LIBC_NVPTX_ARCH
+ NAMES nvptx-arch NO_DEFAULT_PATH
+ PATHS ${LLVM_BINARY_DIR}/bin ${compiler_path})
+ if(LIBC_NVPTX_ARCH)
+ execute_process(COMMAND ${LIBC_NVPTX_ARCH}
+ OUTPUT_VARIABLE arch_tool_output
+ ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(arch_tool_output MATCHES "^sm_[0-9]+")
+ set(PLATFORM_HAS_GPU TRUE)
+ endif()
+ endif()
+endif()
set(gpu_test_architecture "")
if(LIBC_GPU_TEST_ARCHITECTURE)
+ set(LIBC_GPU_TESTS_DISABLED FALSE)
set(gpu_test_architecture ${LIBC_GPU_TEST_ARCHITECTURE})
message(STATUS "Using user-specified GPU architecture for testing: "
"'${gpu_test_architecture}'")
-elseif(detected_gpu_architectures)
- list(GET detected_gpu_architectures 0 gpu_test_architecture)
+elseif(PLATFORM_HAS_GPU)
+ set(LIBC_GPU_TESTS_DISABLED FALSE)
+ set(gpu_test_architecture "native")
message(STATUS "Using GPU architecture detected on the system for testing: "
- "'${gpu_test_architecture}'")
+ "'native'")
else()
- list(LENGTH LIBC_GPU_ARCHITECTURES n_gpu_archs)
- if (${n_gpu_archs} EQUAL 1)
- set(gpu_test_architecture ${LIBC_GPU_ARCHITECTURES})
- message(STATUS "Using user-specified GPU architecture for testing: "
- "'${gpu_test_architecture}'")
- else()
- message(STATUS "No GPU architecture set for testing. GPU tests will not be "
- "availibe. Set 'LIBC_GPU_TEST_ARCHITECTURE' to override.")
- return()
- endif()
+ set(LIBC_GPU_TESTS_DISABLED TRUE)
+ message(STATUS "No GPU architecture detected or provided, tests will not be "
+ "built")
endif()
+set(LIBC_GPU_TARGET_ARCHITECTURE "${gpu_test_architecture}")
-if("${gpu_test_architecture}" IN_LIST all_amdgpu_architectures)
- set(LIBC_GPU_TARGET_ARCHITECTURE_IS_AMDGPU TRUE)
- set(LIBC_GPU_TARGET_TRIPLE ${AMDGPU_TARGET_TRIPLE})
- set(LIBC_GPU_TARGET_ARCHITECTURE "${gpu_test_architecture}")
-elseif("${gpu_test_architecture}" IN_LIST all_nvptx_architectures)
- set(LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX TRUE)
- set(LIBC_GPU_TARGET_TRIPLE ${NVPTX_TARGET_TRIPLE})
- set(LIBC_GPU_TARGET_ARCHITECTURE "${gpu_test_architecture}")
-else()
- message(FATAL_ERROR "Unknown GPU architecture '${gpu_test_architecture}'")
-endif()
+if(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ # FIXME: This is a hack required to keep the CUDA package from trying to find
+ # pthreads. We only link the CUDA driver, so this is unneeded.
+ add_library(CUDA::cudart_static_deps IMPORTED INTERFACE)
-if(LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
find_package(CUDAToolkit QUIET)
if(CUDAToolkit_FOUND)
get_filename_component(LIBC_CUDA_ROOT "${CUDAToolkit_BIN_DIR}" DIRECTORY ABSOLUTE)
endif()
endif()
-if(LIBC_GPU_TARGET_ARCHITECTURE_IS_AMDGPU)
+if(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
# The AMDGPU environment uses different code objects to encode the ABI for
# kernel calls and intrinsic functions. We want to specify this manually to
# conform to whatever the test suite was built to handle.
diff --git a/libc/docs/gpu/using.rst b/libc/docs/gpu/using.rst
index 71f5e7ba2039..79b9116c38ed 100644
--- a/libc/docs/gpu/using.rst
+++ b/libc/docs/gpu/using.rst
@@ -14,25 +14,25 @@ Building the GPU library
LLVM's libc GPU support *must* be built with an up-to-date ``clang`` compiler
due to heavy reliance on ``clang``'s GPU support. This can be done automatically
-using the ``LLVM_ENABLE_RUNTIMES=libc`` option. To enable libc for the GPU,
-enable the ``LIBC_GPU_BUILD`` option. By default, ``libcgpu.a`` will be built
-using every supported GPU architecture. To restrict the number of architectures
-build, either set ``LIBC_GPU_ARCHITECTURES`` to the list of desired
-architectures manually or use ``native`` to detect the GPUs on your system. A
-typical ``cmake`` configuration will look like this:
+using the LLVM runtimes support. The GPU build is done using cross-compilation
+to the GPU architecture. This project currently supports AMD and NVIDIA GPUs
+which can be targeted using the appropriate target name. The following
+invocation will enable a cross-compiling build for the GPU architecture and
+enable the ``libc`` project only for them.
.. code-block:: sh
$> cd llvm-project # The llvm-project checkout
$> mkdir build
$> cd build
- $> cmake ../llvm -G Ninja \
- -DLLVM_ENABLE_PROJECTS="clang;lld;compiler-rt" \
- -DLLVM_ENABLE_RUNTIMES="libc;openmp" \
+ $> cmake ../llvm -G Ninja \
+ -DLLVM_ENABLE_PROJECTS="clang;lld;compiler-rt" \
+ -DLLVM_ENABLE_RUNTIMES="openmp" \
-DCMAKE_BUILD_TYPE=<Debug|Release> \ # Select build type
- -DLIBC_GPU_BUILD=ON \ # Build in GPU mode
- -DLIBC_GPU_ARCHITECTURES=all \ # Build all supported architectures
- -DCMAKE_INSTALL_PREFIX=<PATH> \ # Where 'libcgpu.a' will live
+ -DCMAKE_INSTALL_PREFIX=<PATH> \ # Where 'libcgpu.a' will live
+ -DRUNTIMES_nvptx64-nvidia-cuda_LLVM_ENABLE_RUNTIMES=libc \
+ -DRUNTIMES_amdgcn-amd-amdhsa_LLVM_ENABLE_RUNTIMES=libc \
+ -DLLVM_RUNTIME_TARGETS=default;amdgcn-amd-amdhsa;nvptx64-nvidia-cuda
$> ninja install
Since we want to include ``clang``, ``lld`` and ``compiler-rt`` in our
@@ -40,13 +40,14 @@ toolchain, we list them in ``LLVM_ENABLE_PROJECTS``. To ensure ``libc`` is built
using a compatible compiler and to support ``openmp`` offloading, we list them
in ``LLVM_ENABLE_RUNTIMES`` to build them after the enabled projects using the
newly built compiler. ``CMAKE_INSTALL_PREFIX`` specifies the installation
-directory in which to install the ``libcgpu.a`` library and headers along with
-LLVM. The generated headers will be placed in ``include/gpu-none-llvm``.
+directory in which to install the ``libcgpu-nvptx.a`` and ``libcgpu-amdgpu.a``
+libraries and headers along with LLVM. The generated headers will be placed in
+``include/<gpu-triple>``.
Usage
=====
-Once the ``libcgpu.a`` static archive has been built it can be linked directly
+Once the static archive has been built it can be linked directly
with offloading applications as a standard library. This process is described in
the `clang documentation <https://clang.llvm.org/docs/OffloadingDesign.html>`_.
This linking mode is used by the OpenMP toolchain, but is currently opt-in for
@@ -68,7 +69,7 @@ supported target device. The supported architectures can be seen using LLVM's
OFFLOADING IMAGE [0]:
kind llvm ir
- arch gfx90a
+ arch generic
triple amdgcn-amd-amdhsa
producer none
diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt
index dc3c9b8e6328..9090b3bca01e 100644
--- a/libc/include/CMakeLists.txt
+++ b/libc/include/CMakeLists.txt
@@ -4,7 +4,7 @@ set(LIBC_INCLUDE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
include(LLVMLibCHeaderRules)
# The GPU build wants to install files in the compiler's resource directory.
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(LIBC_TARGET_OS_IS_GPU)
include(GetClangResourceDir)
endif()
@@ -586,7 +586,7 @@ add_gen_header(
.llvm-libc-types.wchar_t
)
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(LIBC_TARGET_OS_IS_GPU)
file(MAKE_DIRECTORY ${LIBC_INCLUDE_DIR}/gpu)
add_gen_header(
@@ -638,7 +638,7 @@ foreach(target IN LISTS all_install_header_targets)
# The GPU optionally provides the supported declarations externally so
# offloading languages like CUDA and OpenMP know what is supported by libc. We
# install these in the compiler's resource directory at a preset location.
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU AND PACKAGE_VERSION)
+ if(LIBC_TARGET_OS_IS_GPU AND PACKAGE_VERSION)
get_target_property(decls_file ${target} DECLS_FILE_PATH)
if(NOT decls_file)
continue()
diff --git a/libc/include/llvm-libc-macros/sys-queue-macros.h b/libc/include/llvm-libc-macros/sys-queue-macros.h
index 59e6a9a392c9..7da643cb7253 100644
--- a/libc/include/llvm-libc-macros/sys-queue-macros.h
+++ b/libc/include/llvm-libc-macros/sys-queue-macros.h
@@ -22,12 +22,12 @@
#define SLIST_HEAD(name, type) \
struct name { \
- struct type *first; \
+ struct type *slh_first; \
}
#define SLIST_CLASS_HEAD(name, type) \
struct name { \
- class type *first; \
+ class type *slh_first; \
}
#define SLIST_HEAD_INITIALIZER(head) \
@@ -45,8 +45,8 @@
// Singly-linked list access methods.
-#define SLIST_EMPTY(head) ((head)->first == NULL)
-#define SLIST_FIRST(head) ((head)->first)
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_NEXT(elem, field) ((elem)->field.next)
#define SLIST_FOREACH(var, head, field) \
@@ -132,18 +132,18 @@
#define STAILQ_HEAD(name, type) \
struct name { \
- struct type *first; \
- struct type **last; \
+ struct type *stqh_first; \
+ struct type **stqh_last; \
}
#define STAILQ_CLASS_HEAD(name, type) \
struct name { \
- class type *first; \
- class type **last; \
+ class type *stqh_first; \
+ class type **stqh_last; \
}
#define STAILQ_HEAD_INITIALIZER(head) \
- { NULL, &(head).first }
+ { NULL, &(head).stqh_first }
#define STAILQ_ENTRY(type) \
struct { \
@@ -157,12 +157,12 @@
// Singly-linked tail queue access methods.
-#define STAILQ_EMPTY(head) ((head)->first == NULL)
-#define STAILQ_FIRST(head) ((head)->first)
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+#define STAILQ_FIRST(head) ((head)->stqh_first)
#define STAILQ_LAST(head, type, field) \
(STAILQ_EMPTY(head) \
? NULL \
- : __containerof((head)->last, QUEUE_TYPEOF(type), field.next))
+ : __containerof((head)->stqh_last, QUEUE_TYPEOF(type), field.next))
#define STAILQ_NEXT(elem, field) ((elem)->field.next)
#define STAILQ_FOREACH(var, head, field) \
@@ -187,8 +187,8 @@
#define STAILQ_CONCAT(head1, head2, type, field) \
do { \
if (!STAILQ_EMPTY(head2)) { \
- *(head1)->last = (head2)->first; \
- (head1)->last = (head2)->last; \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
STAILQ_INIT(head2); \
} \
} while (0)
@@ -196,28 +196,28 @@
#define STAILQ_INIT(head) \
do { \
STAILQ_FIRST(head) = NULL; \
- (head)->last = &STAILQ_FIRST(head); \
+ (head)->stqh_last = &STAILQ_FIRST(head); \
} while (0)
#define STAILQ_INSERT_AFTER(head, listelem, elem, field) \
do { \
if ((STAILQ_NEXT(elem, field) = STAILQ_NEXT(listelem, field)) == NULL) \
- (head)->last = &STAILQ_NEXT(elem, field); \
+ (head)->stqh_last = &STAILQ_NEXT(elem, field); \
STAILQ_NEXT(listelem, field) = (elem); \
} while (0)
#define STAILQ_INSERT_HEAD(head, elem, field) \
do { \
if ((STAILQ_NEXT(elem, field) = STAILQ_FIRST(head)) == NULL) \
- (head)->last = &STAILQ_NEXT(elem, field); \
+ (head)->stqh_last = &STAILQ_NEXT(elem, field); \
STAILQ_FIRST(head) = (elem); \
} while (0)
#define STAILQ_INSERT_TAIL(head, elem, field) \
do { \
STAILQ_NEXT(elem, field) = NULL; \
- *(head)->last = (elem); \
- (head)->last = &STAILQ_NEXT(elem, field); \
+ *(head)->stqh_last = (elem); \
+ (head)->stqh_last = &STAILQ_NEXT(elem, field); \
} while (0)
#define STAILQ_REMOVE(head, elem, type, field) \
@@ -236,27 +236,27 @@
do { \
if ((STAILQ_NEXT(elem, field) = \
STAILQ_NEXT(STAILQ_NEXT(elem, field), field)) == NULL) \
- (head)->last = &STAILQ_NEXT(elem, field); \
+ (head)->stqh_last = &STAILQ_NEXT(elem, field); \
} while (0)
#define STAILQ_REMOVE_HEAD(head, field) \
do { \
if ((STAILQ_FIRST(head) = STAILQ_NEXT(STAILQ_FIRST(head), field)) == NULL) \
- (head)->last = &STAILQ_FIRST(head); \
+ (head)->stqh_last = &STAILQ_FIRST(head); \
} while (0)
#define STAILQ_SWAP(head1, head2, type) \
do { \
QUEUE_TYPEOF(type) *first = STAILQ_FIRST(head1); \
- QUEUE_TYPEOF(type) **last = (head1)->last; \
+ QUEUE_TYPEOF(type) **last = (head1)->stqh_last; \
STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
- (head1)->last = (head2)->last; \
+ (head1)->stqh_last = (head2)->stqh_last; \
STAILQ_FIRST(head2) = first; \
- (head2)->last = last; \
+ (head2)->stqh_last = last; \
if (STAILQ_EMPTY(head1)) \
- (head1)->last = &STAILQ_FIRST(head1); \
+ (head1)->stqh_last = &STAILQ_FIRST(head1); \
if (STAILQ_EMPTY(head2)) \
- (head2)->last = &STAILQ_FIRST(head2); \
+ (head2)->stqh_last = &STAILQ_FIRST(head2); \
} while (0)
#endif // __LLVM_LIBC_MACROS_SYS_QUEUE_MACROS_H
diff --git a/libc/lib/CMakeLists.txt b/libc/lib/CMakeLists.txt
index af7ef2de93dd..615f4270646f 100644
--- a/libc/lib/CMakeLists.txt
+++ b/libc/lib/CMakeLists.txt
@@ -2,11 +2,7 @@ set(libc_archive_targets "")
set(libc_archive_names "")
set(libc_archive_entrypoint_lists "")
if(LLVM_LIBC_FULL_BUILD)
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
- list(APPEND libc_archive_names cgpu mgpu)
- else()
- list(APPEND libc_archive_names c m)
- endif()
+ list(APPEND libc_archive_names c m)
list(APPEND libc_archive_targets libc libm)
list(APPEND libc_archive_entrypoint_lists
TARGET_LIBC_ENTRYPOINTS TARGET_LIBM_ENTRYPOINTS)
@@ -35,18 +31,33 @@ foreach(archive IN ZIP_LISTS
)
if(LLVM_LIBC_FULL_BUILD)
target_link_libraries(${archive_1} PUBLIC libc-headers)
+ if(TARGET libc-startup)
+ add_dependencies(${archive_1} libc-startup)
+ endif()
endif()
list(APPEND added_archive_targets ${archive_1})
-endforeach()
-if(LIBC_TARGET_TRIPLE)
- set(LIBC_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX}/${LIBC_TARGET_TRIPLE})
-elseif(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT LIBC_GPU_BUILD)
- set(LIBC_INSTALL_LIBRARY_DIR
- lib${LLVM_LIBDIR_SUFFIX}/${LLVM_DEFAULT_TARGET_TRIPLE})
-else()
- set(LIBC_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX})
-endif()
+ # Add the offloading version of the library for offloading languages. These
+ # are installed in the standard search path separate from the other libraries.
+ if(LIBC_TARGET_OS_IS_GPU)
+ set(libc_gpu_archive_target ${archive_1}gpu)
+ set(libc_gpu_archive_name ${archive_0}gpu-${LIBC_TARGET_ARCHITECTURE})
+
+ add_gpu_entrypoint_library(
+ ${libc_gpu_archive_target}
+ DEPENDS
+ ${${archive_2}}
+ )
+ set_target_properties(
+ ${libc_gpu_archive_target}
+ PROPERTIES
+ ARCHIVE_OUTPUT_NAME ${libc_gpu_archive_name}
+ )
+ set_target_properties(${libc_gpu_archive_target} PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY ${LLVM_LIBRARY_OUTPUT_INTDIR})
+ list(APPEND added_gpu_archive_targets ${libc_gpu_archive_target})
+ endif()
+endforeach()
install(
TARGETS ${added_archive_targets}
@@ -54,6 +65,14 @@ install(
COMPONENT libc
)
+if(LIBC_TARGET_OS_IS_GPU)
+ install(
+ TARGETS ${added_gpu_archive_targets}
+ ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX}
+ COMPONENT libc
+ )
+endif()
+
if(NOT LIBC_TARGET_OS_IS_BAREMETAL)
# For now we will disable libc-startup installation for baremetal. The
# correct way to do it would be to make a hookable startup for baremetal
diff --git a/libc/src/__support/File/CMakeLists.txt b/libc/src/__support/File/CMakeLists.txt
index b3e4cc4b0277..b7c0612096aa 100644
--- a/libc/src/__support/File/CMakeLists.txt
+++ b/libc/src/__support/File/CMakeLists.txt
@@ -1,5 +1,5 @@
if(NOT (TARGET libc.src.__support.threads.mutex)
- OR LIBC_TARGET_ARCHITECTURE_IS_GPU)
+ OR LIBC_TARGET_OS_IS_GPU)
# Not all platforms have a mutex implementation. If mutex is unvailable,
# we just skip everything about files.
return()
diff --git a/libc/src/__support/GPU/CMakeLists.txt b/libc/src/__support/GPU/CMakeLists.txt
index 5a899215f4b6..d7ebd3cab7ab 100644
--- a/libc/src/__support/GPU/CMakeLists.txt
+++ b/libc/src/__support/GPU/CMakeLists.txt
@@ -1,4 +1,4 @@
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
return()
endif()
diff --git a/libc/src/__support/OSUtil/CMakeLists.txt b/libc/src/__support/OSUtil/CMakeLists.txt
index c19677582643..ca3b3bf1263e 100644
--- a/libc/src/__support/OSUtil/CMakeLists.txt
+++ b/libc/src/__support/OSUtil/CMakeLists.txt
@@ -9,7 +9,7 @@ if(NOT TARGET ${target_os_util})
endif()
# The OSUtil is an object library in GPU mode.
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
add_header_library(
osutil
HDRS
diff --git a/libc/src/__support/RPC/CMakeLists.txt b/libc/src/__support/RPC/CMakeLists.txt
index b44a65b3732e..183fc6f8683e 100644
--- a/libc/src/__support/RPC/CMakeLists.txt
+++ b/libc/src/__support/RPC/CMakeLists.txt
@@ -1,4 +1,4 @@
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
return()
endif()
diff --git a/libc/src/__support/fixed_point/CMakeLists.txt b/libc/src/__support/fixed_point/CMakeLists.txt
index c6bb9e17adfa..64f9dacc7ba5 100644
--- a/libc/src/__support/fixed_point/CMakeLists.txt
+++ b/libc/src/__support/fixed_point/CMakeLists.txt
@@ -17,5 +17,7 @@ add_header_library(
libc.include.llvm-libc-macros.stdfix_macros
libc.src.__support.macros.attributes
libc.src.__support.macros.optimization
+ libc.src.__support.CPP.type_traits
libc.src.__support.CPP.bit
+ libc.src.__support.math_extras
)
diff --git a/libc/src/__support/fixed_point/fx_bits.h b/libc/src/__support/fixed_point/fx_bits.h
index b26be169a593..fcd47cd72cbb 100644
--- a/libc/src/__support/fixed_point/fx_bits.h
+++ b/libc/src/__support/fixed_point/fx_bits.h
@@ -14,6 +14,7 @@
#include "src/__support/CPP/type_traits.h"
#include "src/__support/macros/attributes.h" // LIBC_INLINE
#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+#include "src/__support/math_extras.h"
#include "fx_rep.h"
@@ -21,6 +22,83 @@
namespace LIBC_NAMESPACE::fixed_point {
+template <typename T> struct FXBits {
+private:
+ using fx_rep = FXRep<T>;
+ using StorageType = typename fx_rep::StorageType;
+
+ StorageType value;
+
+ static_assert(fx_rep::FRACTION_LEN > 0);
+
+ static constexpr size_t FRACTION_OFFSET = 0; // Just for completeness
+ static constexpr size_t INTEGRAL_OFFSET =
+ fx_rep::INTEGRAL_LEN == 0 ? 0 : fx_rep::FRACTION_LEN;
+ static constexpr size_t SIGN_OFFSET =
+ fx_rep::SIGN_LEN == 0
+ ? 0
+ : ((sizeof(StorageType) * CHAR_BIT) - fx_rep::SIGN_LEN);
+
+ static constexpr StorageType FRACTION_MASK =
+ mask_trailing_ones<StorageType, fx_rep::FRACTION_LEN>()
+ << FRACTION_OFFSET;
+ static constexpr StorageType INTEGRAL_MASK =
+ mask_trailing_ones<StorageType, fx_rep::INTEGRAL_LEN>()
+ << INTEGRAL_OFFSET;
+ static constexpr StorageType SIGN_MASK =
+ (fx_rep::SIGN_LEN == 0 ? 0 : StorageType(1) << SIGN_OFFSET);
+
+public:
+ LIBC_INLINE constexpr FXBits() = default;
+
+ template <typename XType> LIBC_INLINE constexpr explicit FXBits(XType x) {
+ using Unqual = typename cpp::remove_cv_t<XType>;
+ if constexpr (cpp::is_same_v<Unqual, T>) {
+ value = cpp::bit_cast<StorageType>(x);
+ } else if constexpr (cpp::is_same_v<Unqual, StorageType>) {
+ value = x;
+ } else {
+ // We don't want accidental type promotions/conversions, so we require
+ // exact type match.
+ static_assert(cpp::always_false<XType>);
+ }
+ }
+
+ LIBC_INLINE constexpr StorageType get_fraction() {
+ return (value & FRACTION_MASK) >> FRACTION_OFFSET;
+ }
+
+ LIBC_INLINE constexpr StorageType get_integral() {
+ return (value & INTEGRAL_MASK) >> INTEGRAL_OFFSET;
+ }
+
+ // TODO: replace bool with Sign
+ LIBC_INLINE constexpr bool get_sign() {
+ return static_cast<bool>((value & SIGN_MASK) >> SIGN_OFFSET);
+ }
+
+ // This represents the effective negative exponent applied to this number
+ LIBC_INLINE constexpr int get_exponent() { return fx_rep::FRACTION_LEN; }
+
+ LIBC_INLINE constexpr void set_fraction(StorageType fraction) {
+ value = (value & (~FRACTION_MASK)) |
+ ((fraction << FRACTION_OFFSET) & FRACTION_MASK);
+ }
+
+ LIBC_INLINE constexpr void set_integral(StorageType integral) {
+ value = (value & (~INTEGRAL_MASK)) |
+ ((integral << INTEGRAL_OFFSET) & INTEGRAL_MASK);
+ }
+
+ // TODO: replace bool with Sign
+ LIBC_INLINE constexpr void set_sign(bool sign) {
+ value = (value & (~SIGN_MASK)) |
+ ((static_cast<StorageType>(sign) << SIGN_OFFSET) & SIGN_MASK);
+ }
+
+ LIBC_INLINE constexpr T get_val() const { return cpp::bit_cast<T>(value); }
+};
+
// Bit-wise operations are not available for fixed point types yet.
template <typename T>
LIBC_INLINE constexpr cpp::enable_if_t<cpp::is_fixed_point_v<T>, T>
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 05ce51e8fc65..33dc1fc97c56 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -1,6 +1,9 @@
add_subdirectory(generic)
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_ARCHITECTURE})
add_subdirectory(${LIBC_TARGET_ARCHITECTURE})
+elseif(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
+ # TODO: We should split this into 'nvptx' and 'amdgpu' for the GPU build.
+ add_subdirectory(${LIBC_TARGET_OS})
endif()
function(add_math_entrypoint_object name)
@@ -8,6 +11,7 @@ function(add_math_entrypoint_object name)
# that first and return early if we are able to add an alias target for the
# machine specific implementation.
get_fq_target_name("${LIBC_TARGET_ARCHITECTURE}.${name}" fq_machine_specific_target_name)
+ get_fq_target_name("${LIBC_TARGET_OS}.${name}" fq_os_specific_target_name)
if(TARGET ${fq_machine_specific_target_name})
add_entrypoint_object(
${name}
@@ -16,17 +20,25 @@ function(add_math_entrypoint_object name)
.${LIBC_TARGET_ARCHITECTURE}.${name}
)
return()
+ elseif(TARGET ${fq_os_specific_target_name})
+ add_entrypoint_object(
+ ${name}
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.${name}
+ )
+ return()
endif()
# The GPU optionally depends on vendor libraries. If we emitted one of these
# entrypoints it means the user requested it and we should use it instead.
- get_fq_target_name("${LIBC_TARGET_ARCHITECTURE}.vendor.${name}" fq_vendor_specific_target_name)
+ get_fq_target_name("${LIBC_TARGET_OS}.vendor.${name}" fq_vendor_specific_target_name)
if(TARGET ${fq_vendor_specific_target_name})
add_entrypoint_object(
${name}
ALIAS
DEPENDS
- .${LIBC_TARGET_ARCHITECTURE}.vendor.${name}
+ .${LIBC_TARGET_OS}.vendor.${name}
VENDOR
)
return()
diff --git a/libc/src/math/gpu/vendor/CMakeLists.txt b/libc/src/math/gpu/vendor/CMakeLists.txt
index f699ca103b5f..36087ade63bf 100644
--- a/libc/src/math/gpu/vendor/CMakeLists.txt
+++ b/libc/src/math/gpu/vendor/CMakeLists.txt
@@ -10,7 +10,6 @@ else()
"functions will be an external reference to the vendor libraries.")
endif()
-find_package(CUDAToolkit QUIET)
if(CUDAToolkit_FOUND)
set(libdevice_path ${CUDAToolkit_BIN_DIR}/../nvvm/libdevice/libdevice.10.bc)
if (EXISTS ${libdevice_path})
diff --git a/libc/src/stdio/CMakeLists.txt b/libc/src/stdio/CMakeLists.txt
index 380474ce2711..bb8e41606c5d 100644
--- a/libc/src/stdio/CMakeLists.txt
+++ b/libc/src/stdio/CMakeLists.txt
@@ -22,7 +22,7 @@ if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_OS})
endif()
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/generic)
endif()
diff --git a/libc/src/stdlib/CMakeLists.txt b/libc/src/stdlib/CMakeLists.txt
index a4d51fb9a11e..ce08635df314 100644
--- a/libc/src/stdlib/CMakeLists.txt
+++ b/libc/src/stdlib/CMakeLists.txt
@@ -316,7 +316,7 @@ if(LLVM_LIBC_INCLUDE_SCUDO)
DEPENDS
${SCUDO_DEPS}
)
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
add_entrypoint_external(
calloc
)
@@ -397,7 +397,7 @@ add_entrypoint_object(
.${LIBC_TARGET_OS}.abort
)
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(LIBC_TARGET_OS_IS_GPU)
add_entrypoint_object(
malloc
ALIAS
diff --git a/libc/src/string/CMakeLists.txt b/libc/src/string/CMakeLists.txt
index 6daaf1998ea7..1c893280e8a3 100644
--- a/libc/src/string/CMakeLists.txt
+++ b/libc/src/string/CMakeLists.txt
@@ -501,7 +501,7 @@ if(${LIBC_TARGET_ARCHITECTURE_IS_X86})
add_bcmp(bcmp_x86_64_opt_avx512 COMPILE_OPTIONS -march=skylake-avx512 REQUIRE AVX512BW)
add_bcmp(bcmp_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
add_bcmp(bcmp)
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
add_bcmp(bcmp)
else()
add_bcmp(bcmp_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
@@ -530,7 +530,7 @@ if(${LIBC_TARGET_ARCHITECTURE_IS_X86})
add_bzero(bzero_x86_64_opt_avx512 COMPILE_OPTIONS -march=skylake-avx512 REQUIRE AVX512F)
add_bzero(bzero_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
add_bzero(bzero)
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
add_bzero(bzero)
else()
add_bzero(bzero_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
@@ -562,7 +562,7 @@ if(${LIBC_TARGET_ARCHITECTURE_IS_X86})
elseif(${LIBC_TARGET_ARCHITECTURE_IS_AARCH64})
add_memcmp(memcmp_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
add_memcmp(memcmp)
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
add_memcmp(memcmp)
else()
add_memcmp(memcmp_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
@@ -598,7 +598,7 @@ elseif(${LIBC_TARGET_ARCHITECTURE_IS_AARCH64})
add_memcpy(memcpy_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE}
MLLVM_COMPILE_OPTIONS "-tail-merge-threshold=0")
add_memcpy(memcpy MLLVM_COMPILE_OPTIONS "-tail-merge-threshold=0")
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
add_memcpy(memcpy)
else()
add_memcpy(memcpy_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
@@ -632,7 +632,7 @@ elseif(${LIBC_TARGET_ARCHITECTURE_IS_AARCH64})
add_memmove(memmove_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE}
MLLVM_COMPILE_OPTIONS "-tail-merge-threshold=0")
add_memmove(memmove MLLVM_COMPILE_OPTIONS "-tail-merge-threshold=0")
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
add_memmove(memmove)
else()
add_memmove(memmove_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
@@ -667,7 +667,7 @@ elseif(${LIBC_TARGET_ARCHITECTURE_IS_AARCH64})
add_memset(memset_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE}
MLLVM_COMPILE_OPTIONS "-tail-merge-threshold=0")
add_memset(memset MLLVM_COMPILE_OPTIONS "-tail-merge-threshold=0")
-elseif(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(LIBC_TARGET_OS_IS_GPU)
add_memset(memset)
else()
add_memset(memset_opt_host COMPILE_OPTIONS ${LIBC_COMPILE_OPTIONS_NATIVE})
diff --git a/libc/startup/gpu/CMakeLists.txt b/libc/startup/gpu/CMakeLists.txt
index fa7f69f19520..6f67fa9ff44f 100644
--- a/libc/startup/gpu/CMakeLists.txt
+++ b/libc/startup/gpu/CMakeLists.txt
@@ -28,33 +28,24 @@ function(add_startup_object name)
)
endfunction()
-if(LIBC_GPU_TARGET_ARCHITECTURE_IS_AMDGPU)
- add_subdirectory(amdgpu)
-
- add_startup_object(
- crt1
- ALIAS
- DEPENDS
- .amdgpu.crt1
- )
-elseif(LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
- add_subdirectory(nvptx)
-
- add_startup_object(
- crt1
- ALIAS
- DEPENDS
- .nvptx.crt1
- )
-else()
- # Skip building the startup code if there are no supported GPUs.
- message(STATUS "Skipping startup for gpu target, no GPUs were detected")
- return()
+if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_ARCHITECTURE})
+ add_subdirectory(${LIBC_TARGET_ARCHITECTURE})
endif()
+add_startup_object(
+ crt1
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_ARCHITECTURE}.crt1
+)
+
add_custom_target(libc-startup)
set(startup_components crt1)
foreach(target IN LISTS startup_components)
set(fq_target_name libc.startup.gpu.${target})
add_dependencies(libc-startup ${fq_target_name})
+ install(FILES $<TARGET_OBJECTS:${fq_target_name}>
+ DESTINATION ${LIBC_INSTALL_LIBRARY_DIR}
+ RENAME $<TARGET_PROPERTY:${fq_target_name},OUTPUT_NAME>
+ COMPONENT libc)
endforeach()
diff --git a/libc/startup/gpu/amdgpu/CMakeLists.txt b/libc/startup/gpu/amdgpu/CMakeLists.txt
index c9d0ee2fd0e9..3ac104ee8ba9 100644
--- a/libc/startup/gpu/amdgpu/CMakeLists.txt
+++ b/libc/startup/gpu/amdgpu/CMakeLists.txt
@@ -1,6 +1,5 @@
add_startup_object(
crt1
- NO_GPU_BUNDLE # Compile this file directly without special GPU handling.
SRC
start.cpp
DEPENDS
@@ -11,17 +10,5 @@ add_startup_object(
COMPILE_OPTIONS
-ffreestanding # To avoid compiler warnings about calling the main function.
-fno-builtin
- -mcode-object-version=${LIBC_GPU_CODE_OBJECT_VERSION} # Manually set the ABI.
)
get_fq_target_name(crt1 fq_name)
-
-# Ensure that clang uses the correct linker for this object type.
-target_link_libraries(
- ${fq_name}
- PUBLIC
- "-mcpu=${LIBC_GPU_TARGET_ARCHITECTURE}"
- "--target=${LIBC_GPU_TARGET_TRIPLE}"
- "-flto"
- "-Wl,-mllvm,-amdgpu-lower-global-ctor-dtor=0"
- "-Wl,-mllvm,-amdhsa-code-object-version=${LIBC_GPU_CODE_OBJECT_VERSION}"
-)
diff --git a/libc/startup/gpu/amdgpu/start.cpp b/libc/startup/gpu/amdgpu/start.cpp
index 9d7f04c10b48..dcef719d169d 100644
--- a/libc/startup/gpu/amdgpu/start.cpp
+++ b/libc/startup/gpu/amdgpu/start.cpp
@@ -15,12 +15,6 @@ extern "C" int main(int argc, char **argv, char **envp);
namespace LIBC_NAMESPACE {
-// The AMDGPU architecture provides a fixed frequency clock used for obtaining
-// real time. However, the frequency of this clock varies between cards and can
-// only be obtained via the driver. The loader will set this so we can use it.
-extern "C" [[gnu::visibility("protected")]] uint64_t
- [[clang::address_space(4)]] __llvm_libc_clock_freq = 0;
-
extern "C" uintptr_t __init_array_start[];
extern "C" uintptr_t __init_array_end[];
extern "C" uintptr_t __fini_array_start[];
diff --git a/libc/startup/gpu/nvptx/CMakeLists.txt b/libc/startup/gpu/nvptx/CMakeLists.txt
index 23a54516cc98..3ac104ee8ba9 100644
--- a/libc/startup/gpu/nvptx/CMakeLists.txt
+++ b/libc/startup/gpu/nvptx/CMakeLists.txt
@@ -1,6 +1,5 @@
add_startup_object(
crt1
- NO_GPU_BUNDLE # Compile this file directly without special GPU handling.
SRC
start.cpp
DEPENDS
@@ -13,11 +12,3 @@ add_startup_object(
-fno-builtin
)
get_fq_target_name(crt1 fq_name)
-
-# Ensure that clang uses the correct linker for this object type.
-target_link_libraries(${fq_name}
- PUBLIC
- "-march=${LIBC_GPU_TARGET_ARCHITECTURE}"
- "--target=${LIBC_GPU_TARGET_TRIPLE}"
- "--cuda-path=${LIBC_CUDA_ROOT}"
-)
diff --git a/libc/startup/linux/CMakeLists.txt b/libc/startup/linux/CMakeLists.txt
index 39bcca9cdba9..a287bc4d633d 100644
--- a/libc/startup/linux/CMakeLists.txt
+++ b/libc/startup/linux/CMakeLists.txt
@@ -131,7 +131,8 @@ foreach(target IN LISTS startup_components)
set(fq_target_name libc.startup.linux.${target})
add_dependencies(libc-startup ${fq_target_name})
install(FILES $<TARGET_OBJECTS:${fq_target_name}>
- DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ DESTINATION ${LIBC_INSTALL_LIBRARY_DIR}
RENAME $<TARGET_PROPERTY:${fq_target_name},OUTPUT_NAME>
+ EXCLUDE_FROM_ALL
COMPONENT libc)
endforeach()
diff --git a/libc/test/CMakeLists.txt b/libc/test/CMakeLists.txt
index f22f2b183aca..745a9a04b4af 100644
--- a/libc/test/CMakeLists.txt
+++ b/libc/test/CMakeLists.txt
@@ -8,9 +8,9 @@ add_custom_target(libc-long-running-tests)
add_subdirectory(UnitTest)
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU AND
- (NOT TARGET libc.utils.gpu.loader OR NOT TARGET libc.startup.gpu.crt1))
- message(WARNING "Cannot build libc GPU tests, missing loader implementation")
+if(LIBC_TARGET_OS_IS_GPU AND
+ (NOT TARGET libc.utils.gpu.loader OR LIBC_GPU_TESTS_DISABLED))
+ message(WARNING "Cannot build libc GPU tests, missing loader or architecture")
return()
endif()
diff --git a/libc/test/IntegrationTest/CMakeLists.txt b/libc/test/IntegrationTest/CMakeLists.txt
index dca4c5a6f1b1..4f31f10b29f0 100644
--- a/libc/test/IntegrationTest/CMakeLists.txt
+++ b/libc/test/IntegrationTest/CMakeLists.txt
@@ -1,21 +1,5 @@
-if(LIBC_GPU_TARGET_ARCHITECTURE_IS_AMDGPU)
- set(TEST_COMPILE_FLAGS
- -mcpu=${LIBC_GPU_TARGET_ARCHITECTURE}
- -emit-llvm # AMDGPU's intermediate object file format is bitcode.
- --target=${LIBC_GPU_TARGET_TRIPLE}
- -mcode-object-version=${LIBC_GPU_CODE_OBJECT_VERSION} # Manually set the ABI.
- )
-elseif(LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
- set(TEST_COMPILE_FLAGS
- -march=${LIBC_GPU_TARGET_ARCHITECTURE}
- --target=${LIBC_GPU_TARGET_TRIPLE}
- --cuda-path=${LIBC_CUDA_ROOT}
- )
-endif()
-
add_object_library(
test
- NO_GPU_BUNDLE # Compile this file directly without special GPU handling.
SRCS
test.cpp
COMPILE_OPTIONS
diff --git a/libc/test/UnitTest/CMakeLists.txt b/libc/test/UnitTest/CMakeLists.txt
index 4a615d4bd5e1..4668f0061975 100644
--- a/libc/test/UnitTest/CMakeLists.txt
+++ b/libc/test/UnitTest/CMakeLists.txt
@@ -12,7 +12,7 @@ function(add_unittest_framework_library name)
endif()
# The Nvidia 'nvlink' linker does not support static libraries.
- if(LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+ if(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
set(library_type OBJECT)
else()
set(library_type STATIC)
diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt
index 493ef9ddabe1..53fa1323d18b 100644
--- a/libc/test/src/__support/CMakeLists.txt
+++ b/libc/test/src/__support/CMakeLists.txt
@@ -1,7 +1,7 @@
add_custom_target(libc-support-tests)
# FIXME: These tests are currently broken on the GPU.
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
add_libc_test(
blockstore_test
SUITE
@@ -76,7 +76,7 @@ add_libc_test(
)
# The GPU does not support varargs currently.
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
add_libc_test(
arg_list_test
SUITE
@@ -88,8 +88,7 @@ if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
)
endif()
-# FIXME: Crash in NVPTX target lowering for calls
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_libc_test(
uint_test
SUITE
@@ -159,33 +158,38 @@ add_libc_test(
libc.src.__support.memory_size
)
-add_executable(
- libc_str_to_float_comparison_test
- str_to_float_comparison_test.cpp
-)
+# FIXME: We shouldn't have regular executables created because we could be
+# cross-compiling the tests and running through an emulator.
+if(NOT LIBC_TARGET_OS_IS_GPU)
+ add_executable(
+ libc_str_to_float_comparison_test
+ str_to_float_comparison_test.cpp
+ )
-target_link_libraries(libc_str_to_float_comparison_test
- PRIVATE
- "${LIBC_TARGET}"
-)
+ target_link_libraries(libc_str_to_float_comparison_test
+ PRIVATE
+ "${LIBC_TARGET}"
+ )
-add_executable(
- libc_system_str_to_float_comparison_test
- str_to_float_comparison_test.cpp
-)
+ add_executable(
+ libc_system_str_to_float_comparison_test
+ str_to_float_comparison_test.cpp
+ )
-set(float_test_file ${CMAKE_CURRENT_SOURCE_DIR}/str_to_float_comparison_data.txt)
+ set(float_test_file ${CMAKE_CURRENT_SOURCE_DIR}/str_to_float_comparison_data.txt)
-add_custom_command(TARGET libc_str_to_float_comparison_test
- POST_BUILD
- COMMAND $<TARGET_FILE:libc_str_to_float_comparison_test> ${float_test_file}
- DEPENDS ${float_test_file}
- COMMENT "Test the strtof and strtod implementations against precomputed results."
- VERBATIM)
+ add_custom_command(TARGET libc_str_to_float_comparison_test
+ POST_BUILD
+ COMMAND $<TARGET_FILE:libc_str_to_float_comparison_test> ${float_test_file}
+ DEPENDS ${float_test_file}
+ COMMENT "Test the strtof and strtod implementations against precomputed results."
+ VERBATIM)
+endif()
add_subdirectory(CPP)
add_subdirectory(File)
add_subdirectory(RPC)
add_subdirectory(OSUtil)
add_subdirectory(FPUtil)
+add_subdirectory(fixed_point)
add_subdirectory(HashTable)
diff --git a/libc/test/src/__support/CPP/CMakeLists.txt b/libc/test/src/__support/CPP/CMakeLists.txt
index 6927579289bc..d7f332f5b0fb 100644
--- a/libc/test/src/__support/CPP/CMakeLists.txt
+++ b/libc/test/src/__support/CPP/CMakeLists.txt
@@ -64,7 +64,7 @@ add_libc_test(
# This test fails with invalid address space operations on sm_60
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_libc_test(
atomic_test
SUITE
diff --git a/libc/test/src/__support/FPUtil/fpbits_test.cpp b/libc/test/src/__support/FPUtil/fpbits_test.cpp
index 46f7d2505968..4f9f53afe547 100644
--- a/libc/test/src/__support/FPUtil/fpbits_test.cpp
+++ b/libc/test/src/__support/FPUtil/fpbits_test.cpp
@@ -1,4 +1,4 @@
-//===-- Unittests for the DyadicFloat class -------------------------------===//
+//===-- Unittests for the FPBits class ------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/libc/test/src/__support/File/CMakeLists.txt b/libc/test/src/__support/File/CMakeLists.txt
index f193480c60c2..9191469b4927 100644
--- a/libc/test/src/__support/File/CMakeLists.txt
+++ b/libc/test/src/__support/File/CMakeLists.txt
@@ -1,4 +1,4 @@
-if(NOT (TARGET libc.src.__support.threads.mutex) OR LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT (TARGET libc.src.__support.threads.mutex) OR LIBC_TARGET_OS_IS_GPU)
# Not all platforms have a mutex implementation. If mutex is unvailable,
# we just skip everything about files. The GPU does not currently support
# files as well.
diff --git a/libc/test/src/__support/fixed_point/CMakeLists.txt b/libc/test/src/__support/fixed_point/CMakeLists.txt
new file mode 100644
index 000000000000..384cc9394ee7
--- /dev/null
+++ b/libc/test/src/__support/fixed_point/CMakeLists.txt
@@ -0,0 +1,16 @@
+if(NOT LIBC_COMPILER_HAS_FIXED_POINT)
+ return()
+endif()
+
+add_custom_target(libc-fixed-point-tests)
+
+add_libc_test(
+ fx_bits_test
+ SUITE
+ libc-fixed-point-tests
+ SRCS
+ fx_bits_test.cpp
+ DEPENDS
+ libc.src.__support.fixed_point.fx_bits
+ libc.src.__support.integer_literals
+)
diff --git a/libc/test/src/__support/fixed_point/fx_bits_test.cpp b/libc/test/src/__support/fixed_point/fx_bits_test.cpp
new file mode 100644
index 000000000000..58627816eb8d
--- /dev/null
+++ b/libc/test/src/__support/fixed_point/fx_bits_test.cpp
@@ -0,0 +1,348 @@
+//===-- Unittests for the FXBits class ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "include/llvm-libc-macros/stdfix-macros.h"
+
+#include "src/__support/fixed_point/fx_bits.h"
+#include "src/__support/integer_literals.h"
+#include "test/UnitTest/Test.h"
+
+using LIBC_NAMESPACE::fixed_point::FXBits;
+using LIBC_NAMESPACE::fixed_point::FXRep;
+
+using LIBC_NAMESPACE::operator""_u8;
+using LIBC_NAMESPACE::operator""_u16;
+using LIBC_NAMESPACE::operator""_u32;
+using LIBC_NAMESPACE::operator""_u64;
+
+// -------------------------------- SHORT TESTS --------------------------------
+
+TEST(LlvmLibcFxBitsTest, FXBits_UnsignedShortFract) {
+ auto bits_var = FXBits<unsigned short fract>(0b00000000_u8);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00_u8);
+
+ // Since an unsigned fract has no sign or integral components, setting either
+ // should have no effect.
+
+ bits_var.set_sign(true);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00_u8);
+
+ bits_var.set_integral(0xab);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00_u8);
+
+ // but setting the fraction should work
+
+ bits_var.set_fraction(0xcd);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0xcd_u8);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_UnsignedShortAccum) {
+ auto bits_var = FXBits<unsigned short accum>(0b00000000'00000000_u16);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_sign(true); // 0 sign bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_integral(0xabcd); // 8 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00cd_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_fraction(0x21fe); // 8 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00cd_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00fe_u16);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_ShortFract) {
+ auto bits_var = FXBits<short fract>(0b0'0000000_u8);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00_u8);
+
+ bits_var.set_sign(true); // 1 sign bit used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00_u8);
+
+ bits_var.set_integral(0xab); // 0 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00_u8);
+
+ bits_var.set_fraction(0xcd); // 7 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00_u8);
+ EXPECT_EQ(bits_var.get_fraction(), 0x4d_u8);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_ShortAccum) {
+ auto bits_var = FXBits<short accum>(0b0'00000000'0000000_u16);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_sign(true); // 1 sign bit used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_integral(0xabcd); // 8 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00cd_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_fraction(0x21fe); // 7 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00cd_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x007e_u16);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_UnsignedFract) {
+ auto bits_var = FXBits<unsigned fract>(0b0000000000000000_u16);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_sign(true); // 0 sign bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_integral(0xabcd); // 0 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_fraction(0xef12); // 16 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0xef12_u16);
+}
+
+// -------------------------------- NORMAL TESTS -------------------------------
+
+TEST(LlvmLibcFxBitsTest, FXBits_UnsignedAccum) {
+ auto bits_var =
+ FXBits<unsigned accum>(0b0000000000000000'0000000000000000_u32);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_sign(true); // 0 sign bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_integral(0xabcd); // 16 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000abcd_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_fraction(0xef12); // 16 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000abcd_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000ef12_u32);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_Fract) {
+ auto bits_var = FXBits<fract>(0b0'000000000000000_u16);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_sign(true); // 1 sign bit used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_integral(0xabcd); // 0 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000_u16);
+
+ bits_var.set_fraction(0xef12); // 15 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000_u16);
+ EXPECT_EQ(bits_var.get_fraction(), 0x6f12_u16);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_Accum) {
+ auto bits_var = FXBits<accum>(0b0'0000000000000000'000000000000000_u32);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_sign(true); // 1 sign bit used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_integral(0xabcd); // 16 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000abcd_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_fraction(0xef12); // 15 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000abcd_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00006f12_u32);
+}
+
+// --------------------------------- LONG TESTS --------------------------------
+
+TEST(LlvmLibcFxBitsTest, FXBits_UnsignedLongFract) {
+ auto bits_var =
+ FXBits<unsigned long fract>(0b00000000000000000000000000000000_u32);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_sign(true); // 0 sign bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_integral(0xabcdef12); // 0 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_fraction(0xfedcba98); // 32 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0xfedcba98_u32);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_UnsignedLongAccum) {
+ auto bits_var = FXBits<unsigned long accum>(
+ 0b00000000000000000000000000000000'00000000000000000000000000000000_u64);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000000000000000_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000000000000000_u64);
+
+ bits_var.set_sign(true); // 0 sign bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000000000000000_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000000000000000_u64);
+
+ bits_var.set_integral(0xabcdef12); // 32 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000abcdef12_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000000000000000_u64);
+
+ bits_var.set_fraction(0xfedcba98); // 32 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000abcdef12_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000fedcba98_u64);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_LongFract) {
+ auto bits_var = FXBits<long fract>(0b0'0000000000000000000000000000000_u32);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_sign(true); // 1 sign bit used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_integral(0xabcdef12); // 0 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x00000000_u32);
+
+ bits_var.set_fraction(0xfedcba98); // 31 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000_u32);
+ EXPECT_EQ(bits_var.get_fraction(), 0x7edcba98_u32);
+}
+
+TEST(LlvmLibcFxBitsTest, FXBits_LongAccum) {
+ auto bits_var = FXBits<long accum>(
+ 0b0'00000000000000000000000000000000'0000000000000000000000000000000_u64);
+
+ EXPECT_EQ(bits_var.get_sign(), false);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000000000000000_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000000000000000_u64);
+
+ bits_var.set_sign(true); // 1 sign bit used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x0000000000000000_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000000000000000_u64);
+
+ bits_var.set_integral(0xabcdef12); // 32 integral bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000abcdef12_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x0000000000000000_u64);
+
+ bits_var.set_fraction(0xfedcba98); // 31 fractional bits used
+
+ EXPECT_EQ(bits_var.get_sign(), true);
+ EXPECT_EQ(bits_var.get_integral(), 0x00000000abcdef12_u64);
+ EXPECT_EQ(bits_var.get_fraction(), 0x000000007edcba98_u64);
+}
diff --git a/libc/test/src/errno/CMakeLists.txt b/libc/test/src/errno/CMakeLists.txt
index 633d46a1f5f8..b73962fb4de4 100644
--- a/libc/test/src/errno/CMakeLists.txt
+++ b/libc/test/src/errno/CMakeLists.txt
@@ -1,4 +1,4 @@
-if(NOT LLVM_LIBC_FULL_BUILD OR LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LLVM_LIBC_FULL_BUILD OR LIBC_TARGET_OS_IS_GPU)
return()
endif()
diff --git a/libc/test/src/math/CMakeLists.txt b/libc/test/src/math/CMakeLists.txt
index 8c105515e352..81d2e1e55b55 100644
--- a/libc/test/src/math/CMakeLists.txt
+++ b/libc/test/src/math/CMakeLists.txt
@@ -1,10 +1,14 @@
add_custom_target(libc-math-unittests)
-add_library(
- libc_math_test_utils
- RandUtils.cpp
- RandUtils.h
-)
+# FIXME: We shouldn't have regular libraries created because we could be
+# cross-compiling the tests and running through an emulator.
+if(NOT LIBC_TARGET_OS_IS_GPU)
+ add_library(
+ libc_math_test_utils
+ RandUtils.cpp
+ RandUtils.h
+ )
+endif()
add_fp_unittest(
cosf_test
@@ -755,7 +759,7 @@ add_fp_unittest(
)
# FIXME: These tests are currently broken for NVPTX.
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_fp_unittest(
ilogb_test
SUITE
@@ -986,7 +990,7 @@ add_fp_unittest(
)
# FIXME: These tests are currently broken on the GPU.
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
add_fp_unittest(
fminf_test
SUITE
@@ -1231,7 +1235,7 @@ add_fp_unittest(
)
# FIXME: These tests are currently spurious for NVPTX.
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_fp_unittest(
nextafter_test
SUITE
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index 1824c672cb97..2d24b5a76b01 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -819,7 +819,7 @@ add_fp_unittest(
)
# FIXME: These tests are currently broken for NVPTX.
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_fp_unittest(
ilogb_test
SUITE
@@ -1073,7 +1073,7 @@ add_fp_unittest(
)
# FIXME: These tests are currently broken on the GPU.
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
add_fp_unittest(
fminf_test
SUITE
@@ -1417,7 +1417,7 @@ add_fp_unittest(
)
# FIXME: These tests are currently spurious for NVPTX.
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_fp_unittest(
nextafter_test
SUITE
@@ -1465,7 +1465,7 @@ add_fp_unittest(
)
# FIXME: These tests are currently spurious for the GPU.
-if(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(NOT LIBC_TARGET_OS_IS_GPU)
add_fp_unittest(
nexttoward_test
SUITE
diff --git a/libc/test/src/stdio/CMakeLists.txt b/libc/test/src/stdio/CMakeLists.txt
index 8db2293ab74a..93c21aa994ef 100644
--- a/libc/test/src/stdio/CMakeLists.txt
+++ b/libc/test/src/stdio/CMakeLists.txt
@@ -430,7 +430,7 @@ add_libc_test(
# Create an output directory for any temporary test files.
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/testdata)
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(LIBC_TARGET_OS_IS_GPU)
return()
endif()
diff --git a/libc/test/src/stdlib/CMakeLists.txt b/libc/test/src/stdlib/CMakeLists.txt
index da07dbbe7977..5826cfe8d4ca 100644
--- a/libc/test/src/stdlib/CMakeLists.txt
+++ b/libc/test/src/stdlib/CMakeLists.txt
@@ -55,7 +55,7 @@ add_libc_test(
)
# This fails on NVPTX where the output value is one-off of the expected value.
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_fp_unittest(
strtod_test
SUITE
@@ -127,7 +127,7 @@ add_libc_test(
)
# This fails on NVPTX where the output value is one-off of the expected value.
-if(NOT LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+if(NOT LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_libc_test(
strtold_test
SUITE
@@ -339,7 +339,7 @@ if(LLVM_LIBC_FULL_BUILD)
)
# Only the GPU has an in-tree 'malloc' implementation.
- if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+ if(LIBC_TARGET_OS_IS_GPU)
add_libc_test(
malloc_test
HERMETIC_TEST_ONLY
diff --git a/libc/test/utils/UnitTest/CMakeLists.txt b/libc/test/utils/UnitTest/CMakeLists.txt
index 6f61e0ffefb0..3b917e06cde2 100644
--- a/libc/test/utils/UnitTest/CMakeLists.txt
+++ b/libc/test/utils/UnitTest/CMakeLists.txt
@@ -1,4 +1,4 @@
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(LIBC_TARGET_OS_IS_GPU)
return()
endif()
diff --git a/libc/utils/CMakeLists.txt b/libc/utils/CMakeLists.txt
index 9754dcf3854a..7bf02a4af7de 100644
--- a/libc/utils/CMakeLists.txt
+++ b/libc/utils/CMakeLists.txt
@@ -1,6 +1,6 @@
if(LLVM_INCLUDE_TESTS)
add_subdirectory(MPFRWrapper)
endif()
-if(LIBC_TARGET_ARCHITECTURE_IS_GPU)
+if(LIBC_TARGET_OS_IS_GPU)
add_subdirectory(gpu)
endif()
diff --git a/libc/utils/MPFRWrapper/CMakeLists.txt b/libc/utils/MPFRWrapper/CMakeLists.txt
index adc073c9a91f..6f44ca0d786c 100644
--- a/libc/utils/MPFRWrapper/CMakeLists.txt
+++ b/libc/utils/MPFRWrapper/CMakeLists.txt
@@ -24,6 +24,6 @@ if(LIBC_TESTS_CAN_USE_MPFR)
target_link_directories(libcMPFRWrapper PUBLIC ${LLVM_LIBC_MPFR_INSTALL_PATH}/lib)
endif()
target_link_libraries(libcMPFRWrapper PUBLIC LibcFPTestHelpers.unit LibcTest.unit mpfr gmp)
-elseif(NOT LIBC_TARGET_ARCHITECTURE_IS_GPU)
+elseif(NOT LIBC_TARGET_OS_IS_GPU)
message(WARNING "Math tests using MPFR will be skipped.")
endif()
diff --git a/libc/utils/gpu/CMakeLists.txt b/libc/utils/gpu/CMakeLists.txt
index 7c15f36052cf..4d1ebcfb9f8e 100644
--- a/libc/utils/gpu/CMakeLists.txt
+++ b/libc/utils/gpu/CMakeLists.txt
@@ -1,2 +1,4 @@
add_subdirectory(server)
-add_subdirectory(loader)
+if(LIBC_TARGET_OS_IS_GPU)
+ add_subdirectory(loader)
+endif()
diff --git a/libc/utils/gpu/loader/CMakeLists.txt b/libc/utils/gpu/loader/CMakeLists.txt
index f195b887c9af..189460bb02e6 100644
--- a/libc/utils/gpu/loader/CMakeLists.txt
+++ b/libc/utils/gpu/loader/CMakeLists.txt
@@ -1,31 +1,30 @@
add_library(gpu_loader OBJECT Main.cpp)
+
target_include_directories(gpu_loader PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
${LIBC_SOURCE_DIR}/include
${LIBC_SOURCE_DIR}
)
+# This utility needs to be compiled for the host system when cross compiling.
+if(LLVM_RUNTIMES_TARGET OR LIBC_TARGET_TRIPLE)
+ target_compile_options(gpu_loader PUBLIC --target=${LLVM_HOST_TRIPLE})
+ target_link_libraries(gpu_loader PUBLIC "--target=${LLVM_HOST_TRIPLE}")
+endif()
+
find_package(hsa-runtime64 QUIET 1.2.0 HINTS ${CMAKE_INSTALL_PREFIX} PATHS /opt/rocm)
-if(hsa-runtime64_FOUND)
+if(hsa-runtime64_FOUND AND LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
add_subdirectory(amdgpu)
-else()
+elseif(LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
message(STATUS "Skipping HSA loader for gpu target, no HSA was detected")
endif()
-find_package(CUDAToolkit QUIET)
# The CUDA loader requires LLVM to traverse the ELF image for symbols.
find_package(LLVM QUIET)
-if(CUDAToolkit_FOUND AND LLVM_FOUND AND
- "${CUDAToolkit_VERSION}" VERSION_GREATER_EQUAL "11.2")
+if(CUDAToolkit_FOUND AND LLVM_FOUND AND LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_subdirectory(nvptx)
-else()
- if("${CUDAToolkit_VERSION}" VERSION_LESS "11.2")
- message(WARNING
- "Skipping CUDA loader for gpu target, CUDA must be version 11.2 or later.
- Found CUDA Version ${CUDAToolkit_VERSION}")
- else()
- message(STATUS "Skipping CUDA loader for gpu target, no CUDA was detected")
- endif()
+elseif(LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
+ message(STATUS "Skipping CUDA loader for gpu target, no CUDA was detected")
endif()
# Add a custom target to be used for testing.
@@ -37,20 +36,31 @@ if(LIBC_GPU_LOADER_EXECUTABLE)
PROPERTIES
EXECUTABLE "${LIBC_GPU_LOADER_EXECUTABLE}"
)
-elseif(TARGET amdhsa_loader AND LIBC_GPU_TARGET_ARCHITECTURE_IS_AMDGPU)
+elseif(TARGET amdhsa-loader AND LIBC_TARGET_ARCHITECTURE_IS_AMDGPU)
add_custom_target(libc.utils.gpu.loader)
- add_dependencies(libc.utils.gpu.loader amdhsa_loader)
+ add_dependencies(libc.utils.gpu.loader amdhsa-loader)
set_target_properties(
libc.utils.gpu.loader
PROPERTIES
- EXECUTABLE "$<TARGET_FILE:amdhsa_loader>"
+ TARGET amdhsa-loader
+ EXECUTABLE "$<TARGET_FILE:amdhsa-loader>"
)
-elseif(TARGET nvptx_loader AND LIBC_GPU_TARGET_ARCHITECTURE_IS_NVPTX)
+elseif(TARGET nvptx-loader AND LIBC_TARGET_ARCHITECTURE_IS_NVPTX)
add_custom_target(libc.utils.gpu.loader)
- add_dependencies(libc.utils.gpu.loader nvptx_loader)
+ add_dependencies(libc.utils.gpu.loader nvptx-loader)
set_target_properties(
libc.utils.gpu.loader
PROPERTIES
- EXECUTABLE "$<TARGET_FILE:nvptx_loader>"
+ TARGET nvptx-loader
+ EXECUTABLE "$<TARGET_FILE:nvptx-loader>"
)
endif()
+
+if(TARGET libc.utils.gpu.loader)
+ get_target_property(gpu_loader_tgt libc.utils.gpu.loader "TARGET")
+ if(gpu_loader_tgt)
+ install(TARGETS ${gpu_loader_tgt}
+ DESTINATION ${CMAKE_INSTALL_BINDIR}
+ COMPONENT libc)
+ endif()
+endif()
diff --git a/libc/utils/gpu/loader/amdgpu/CMakeLists.txt b/libc/utils/gpu/loader/amdgpu/CMakeLists.txt
index 8e9c9a2bdc7d..b99319f50401 100644
--- a/libc/utils/gpu/loader/amdgpu/CMakeLists.txt
+++ b/libc/utils/gpu/loader/amdgpu/CMakeLists.txt
@@ -1,7 +1,7 @@
-add_executable(amdhsa_loader Loader.cpp)
-add_dependencies(amdhsa_loader libc.src.__support.RPC.rpc)
+add_executable(amdhsa-loader Loader.cpp)
+add_dependencies(amdhsa-loader libc.src.__support.RPC.rpc)
-target_link_libraries(amdhsa_loader
+target_link_libraries(amdhsa-loader
PRIVATE
hsa-runtime64::hsa-runtime64
gpu_loader
diff --git a/libc/utils/gpu/loader/nvptx/CMakeLists.txt b/libc/utils/gpu/loader/nvptx/CMakeLists.txt
index 0c76c49fa309..e76362a1e8cc 100644
--- a/libc/utils/gpu/loader/nvptx/CMakeLists.txt
+++ b/libc/utils/gpu/loader/nvptx/CMakeLists.txt
@@ -1,11 +1,11 @@
-add_executable(nvptx_loader Loader.cpp)
-add_dependencies(nvptx_loader libc.src.__support.RPC.rpc)
+add_executable(nvptx-loader Loader.cpp)
+add_dependencies(nvptx-loader libc.src.__support.RPC.rpc)
if(NOT LLVM_ENABLE_RTTI)
- target_compile_options(nvptx_loader PRIVATE -fno-rtti)
+ target_compile_options(nvptx-loader PRIVATE -fno-rtti)
endif()
-target_include_directories(nvptx_loader PRIVATE ${LLVM_INCLUDE_DIRS})
-target_link_libraries(nvptx_loader
+target_include_directories(nvptx-loader PRIVATE ${LLVM_INCLUDE_DIRS})
+target_link_libraries(nvptx-loader
PRIVATE
gpu_loader
llvmlibc_rpc_server
diff --git a/libc/utils/gpu/server/CMakeLists.txt b/libc/utils/gpu/server/CMakeLists.txt
index 3d9b2bcab4db..94cdfe5bf652 100644
--- a/libc/utils/gpu/server/CMakeLists.txt
+++ b/libc/utils/gpu/server/CMakeLists.txt
@@ -5,12 +5,21 @@ target_include_directories(llvmlibc_rpc_server PRIVATE ${LIBC_SOURCE_DIR})
target_include_directories(llvmlibc_rpc_server PUBLIC ${LIBC_SOURCE_DIR}/include)
target_include_directories(llvmlibc_rpc_server PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
+
# Ignore unsupported clang attributes if we're using GCC.
target_compile_options(llvmlibc_rpc_server PUBLIC
$<$<CXX_COMPILER_ID:GNU>:-Wno-attributes>)
target_compile_definitions(llvmlibc_rpc_server PUBLIC
LIBC_NAMESPACE=${LIBC_NAMESPACE})
+# This utility needs to be compiled for the host system when cross compiling.
+if(LLVM_RUNTIMES_TARGET OR LIBC_TARGET_TRIPLE)
+ target_compile_options(llvmlibc_rpc_server PUBLIC
+ --target=${LLVM_HOST_TRIPLE})
+ target_link_libraries(llvmlibc_rpc_server PUBLIC
+ "--target=${LLVM_HOST_TRIPLE}")
+endif()
+
# Install the server and associated header.
install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/rpc_server.h
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gpu-none-llvm/
diff --git a/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp b/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp
index 0d1bb77583be..26703f748d87 100644
--- a/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp
+++ b/libcxx/test/libcxx/input.output/filesystems/class.directory_entry/directory_entry.mods/last_write_time.pass.cpp
@@ -18,7 +18,7 @@
// UNSUPPORTED: windows
// This test assumes that time is stored as a 64 bit value when on MVS it is stored as 32 bit
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
// <filesystem>
diff --git a/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp b/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp
index 267f87bd3f6f..f94ceaf57dba 100644
--- a/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp
+++ b/libcxx/test/libcxx/language.support/support.dynamic/libcpp_deallocate.sh.cpp
@@ -11,7 +11,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
// XFAIL: sanitizer-new-delete && !hwasan
diff --git a/libcxx/test/libcxx/language.support/support.dynamic/new_faligned_allocation.pass.cpp b/libcxx/test/libcxx/language.support/support.dynamic/new_faligned_allocation.pass.cpp
index 9cf1b275abc5..69c46f00fb65 100644
--- a/libcxx/test/libcxx/language.support/support.dynamic/new_faligned_allocation.pass.cpp
+++ b/libcxx/test/libcxx/language.support/support.dynamic/new_faligned_allocation.pass.cpp
@@ -11,7 +11,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
// REQUIRES: -faligned-allocation
// ADDITIONAL_COMPILE_FLAGS: -faligned-allocation
diff --git a/libcxx/test/libcxx/thread/thread.condition/PR30202_notify_from_pthread_created_thread.pass.cpp b/libcxx/test/libcxx/thread/thread.condition/PR30202_notify_from_pthread_created_thread.pass.cpp
index fd863fb2d3fd..b1a3f86e8643 100644
--- a/libcxx/test/libcxx/thread/thread.condition/PR30202_notify_from_pthread_created_thread.pass.cpp
+++ b/libcxx/test/libcxx/thread/thread.condition/PR30202_notify_from_pthread_created_thread.pass.cpp
@@ -14,7 +14,7 @@
// UNSUPPORTED: c++03
// PR30202 was fixed starting in macosx10.13.
-// UNSUPPORTED: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11|12}}
+// XFAIL: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11|12}}
// <condition_variable>
diff --git a/libcxx/test/libcxx/thread/thread.threads/thread.thread.this/sleep_for.pass.cpp b/libcxx/test/libcxx/thread/thread.threads/thread.thread.this/sleep_for.pass.cpp
index 903135947896..47741d0851e8 100644
--- a/libcxx/test/libcxx/thread/thread.threads/thread.thread.this/sleep_for.pass.cpp
+++ b/libcxx/test/libcxx/thread/thread.threads/thread.thread.this/sleep_for.pass.cpp
@@ -11,7 +11,7 @@
// Until 58a0a70fb2f1, this_thread::sleep_for could sometimes get interrupted
// by signals and this test would fail spuriously. Disable the test on the
// corresponding system libraries.
-// UNSUPPORTED: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11}}
+// XFAIL: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11}}
// ALLOW_RETRIES: 3
diff --git a/libcxx/test/std/input.output/filesystems/fs.op.funcs/fs.op.remove_all/toctou.pass.cpp b/libcxx/test/std/input.output/filesystems/fs.op.funcs/fs.op.remove_all/toctou.pass.cpp
index 637139b81d54..5248ba24b403 100644
--- a/libcxx/test/std/input.output/filesystems/fs.op.funcs/fs.op.remove_all/toctou.pass.cpp
+++ b/libcxx/test/std/input.output/filesystems/fs.op.funcs/fs.op.remove_all/toctou.pass.cpp
@@ -36,7 +36,7 @@
// UNSUPPORTED: stdlib=apple-libc++ && target={{.+}}-apple-macosx12.{{0|1|2}}
// Windows doesn't support the necessary APIs to mitigate this issue.
-// UNSUPPORTED: target={{.+}}-windows-{{.+}}
+// XFAIL: target={{.+}}-windows-{{.+}}
#include <cstdio>
#include <filesystem>
diff --git a/libcxx/test/std/language.support/cmp/cmp.alg/strong_order_long_double.verify.cpp b/libcxx/test/std/language.support/cmp/cmp.alg/strong_order_long_double.verify.cpp
index fd16afeefb03..c9c2ba200214 100644
--- a/libcxx/test/std/language.support/cmp/cmp.alg/strong_order_long_double.verify.cpp
+++ b/libcxx/test/std/language.support/cmp/cmp.alg/strong_order_long_double.verify.cpp
@@ -10,18 +10,18 @@
// The following platforms have sizeof(long double) == sizeof(double), so this test doesn't apply to them.
// This test does apply to aarch64 where Arm's AAPCS64 is followed. There they are different sizes.
-// UNSUPPORTED: target={{arm64|arm64e|armv(7|8)(l|m)?|powerpc|powerpc64}}-{{.+}}
+// XFAIL: target={{arm64|arm64e|armv(7|8)(l|m)?|powerpc|powerpc64}}-{{.+}}
// MSVC configurations have long double equal to regular double on all
// architectures.
-// UNSUPPORTED: target={{.+}}-pc-windows-msvc
+// XFAIL: target={{.+}}-pc-windows-msvc
// ARM/AArch64 MinGW also has got long double equal to regular double, just
// like MSVC (thus match both MinGW and MSVC here, for those architectures).
-// UNSUPPORTED: target={{aarch64|armv7}}-{{.*}}-windows-{{.+}}
+// XFAIL: target={{aarch64|armv7}}-{{.*}}-windows-{{.+}}
// Android's 32-bit x86 target has long double equal to regular double.
-// UNSUPPORTED: target=i686-{{.+}}-android{{.*}}
+// XFAIL: target=i686-{{.+}}-android{{.*}}
// <compare>
diff --git a/libcxx/test/std/language.support/support.dynamic/align_val_t.pass.cpp b/libcxx/test/std/language.support/support.dynamic/align_val_t.pass.cpp
index c1f9f2f6cc6f..28c72f0be7ae 100644
--- a/libcxx/test/std/language.support/support.dynamic/align_val_t.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/align_val_t.pass.cpp
@@ -12,7 +12,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <type_traits>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/delete_align_val_t_replace.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/delete_align_val_t_replace.pass.cpp
index 33d5225586b9..60b88ec79248 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/delete_align_val_t_replace.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/delete_align_val_t_replace.pass.cpp
@@ -12,7 +12,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.pass.cpp
index c903c63f68c4..dd8090aca5b2 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.indirect.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.indirect.pass.cpp
index 66cbb4b9c8eb..0b540e09bab3 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.indirect.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.indirect.pass.cpp
@@ -17,7 +17,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.pass.cpp
index 4619a71629a7..2d021ecb30e7 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align.replace.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.pass.cpp
index 0343d51f184d..6ae8ceaf534e 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.indirect.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.indirect.pass.cpp
index f6959172ea24..227b20f0b1e1 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.indirect.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.indirect.pass.cpp
@@ -22,7 +22,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.pass.cpp
index b984e8cf0a43..17d38e3de8cd 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/new.size_align_nothrow.replace.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/nodiscard.verify.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/nodiscard.verify.cpp
index 0f30cf0135a4..509fa98446f8 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/nodiscard.verify.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.array/nodiscard.verify.cpp
@@ -19,7 +19,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/delete_align_val_t_replace.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/delete_align_val_t_replace.pass.cpp
index c2021c5d84e9..c346c42f157b 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/delete_align_val_t_replace.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/delete_align_val_t_replace.pass.cpp
@@ -12,7 +12,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.pass.cpp
index 93edb32130c3..dbb10a76ad9e 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.replace.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.replace.pass.cpp
index 87d061a9d1a3..e5ef5f166975 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.replace.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align.replace.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.pass.cpp
index 1c575729678d..b9d8ea2f4e49 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.indirect.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.indirect.pass.cpp
index 2e7fa132890b..7eab0729f9ef 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.indirect.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.indirect.pass.cpp
@@ -21,7 +21,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.pass.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.pass.cpp
index 3c9e17b0b02b..9a5b53e03902 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.pass.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/new.size_align_nothrow.replace.pass.cpp
@@ -15,7 +15,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
#include <cstddef>
diff --git a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/nodiscard.verify.cpp b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/nodiscard.verify.cpp
index 16d6a223eb53..3cda8ad9a1e6 100644
--- a/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/nodiscard.verify.cpp
+++ b/libcxx/test/std/language.support/support.dynamic/new.delete/new.delete.single/nodiscard.verify.cpp
@@ -19,7 +19,7 @@
// Libc++ when built for z/OS doesn't contain the aligned allocation functions,
// nor does the dynamic library shipped with z/OS.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <new>
diff --git a/libcxx/test/std/language.support/support.rtti/type.info/type_info.equal.pass.cpp b/libcxx/test/std/language.support/support.rtti/type.info/type_info.equal.pass.cpp
index 3f5dd9626192..8092f7c5cd01 100644
--- a/libcxx/test/std/language.support/support.rtti/type.info/type_info.equal.pass.cpp
+++ b/libcxx/test/std/language.support/support.rtti/type.info/type_info.equal.pass.cpp
@@ -12,10 +12,6 @@
// UNSUPPORTED: no-rtti
-// When we build for Windows on top of the VC runtime, `typeinfo::operator==` may not
-// be `constexpr` (depending on the version of the VC runtime). So this test can fail.
-// UNSUPPORTED: target={{.+}}-windows-msvc && !libcpp-no-vcruntime
-
#include <typeinfo>
#include <cassert>
diff --git a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_in.pass.cpp b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_in.pass.cpp
index 9204ea26b9c9..9e1d0a1c7bac 100644
--- a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_in.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_in.pass.cpp
@@ -20,7 +20,7 @@
// Test is intended to convert between UTF8 and UTF16/32, it will fail on
// z/OS since at default char type on z/OS is EBCDIC character which has
// value different from ASCII character.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <locale>
#include <string>
diff --git a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_length.pass.cpp b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_length.pass.cpp
index 98c5824a3d4a..eeef2a80c20b 100644
--- a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_length.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_length.pass.cpp
@@ -18,7 +18,7 @@
// Test is intended to convert between UTF8 and UTF16/32, it will fail on
// z/OS since at default char type on z/OS is EBCDIC character which has
// value different from ASCII character.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <locale>
#include <cassert>
diff --git a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_out.pass.cpp b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_out.pass.cpp
index a8f16fcc29a1..2db95b562081 100644
--- a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_out.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char16_t_out.pass.cpp
@@ -20,7 +20,7 @@
// Test is intended to convert between UTF8 and UTF16/32, it will fail on
// z/OS since at default char type on z/OS is EBCDIC character which has
// value different from ASCII character.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <locale>
#include <string>
diff --git a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_in.pass.cpp b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_in.pass.cpp
index 74cc4793aaf9..94602128f8cd 100644
--- a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_in.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_in.pass.cpp
@@ -20,7 +20,7 @@
// Test is intended to convert between UTF8 and UTF16/32, it will fail on
// z/OS since at default char type on z/OS is EBCDIC character which has
// value different from ASCII character.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <locale>
#include <string>
diff --git a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_length.pass.cpp b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_length.pass.cpp
index f51a9db212a2..03d35830f424 100644
--- a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_length.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_length.pass.cpp
@@ -18,7 +18,7 @@
// Test is intended to convert between UTF8 and UTF16/32, it will fail on
// z/OS since at default char type on z/OS is EBCDIC character which has
// value different from ASCII character.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <locale>
#include <cassert>
diff --git a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_out.pass.cpp b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_out.pass.cpp
index 379b607a80ee..df58cc9f518d 100644
--- a/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_out.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.ctype/locale.codecvt/locale.codecvt.members/char32_t_out.pass.cpp
@@ -20,7 +20,7 @@
// Test is intended to convert between UTF8 and UTF16/32, it will fail on
// z/OS since at default char type on z/OS is EBCDIC character which has
// value different from ASCII character.
-// UNSUPPORTED: target={{.+}}-zos{{.*}}
+// XFAIL: target={{.+}}-zos{{.*}}
#include <locale>
#include <string>
diff --git a/libcxx/test/std/utilities/format/format.functions/escaped_output.unicode.pass.cpp b/libcxx/test/std/utilities/format/format.functions/escaped_output.unicode.pass.cpp
index 23b26722d7be..bf5c0a51f944 100644
--- a/libcxx/test/std/utilities/format/format.functions/escaped_output.unicode.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.functions/escaped_output.unicode.pass.cpp
@@ -11,9 +11,6 @@
// This version runs the test when the platform has Unicode support.
// UNSUPPORTED: libcpp-has-no-unicode
-// TODO FMT Investigate Windows issues.
-// UNSUPPORTED: msvc, target={{.+}}-windows-gnu
-
// TODO FMT This test should not require std::to_chars(floating-point)
// XFAIL: availability-fp_to_chars-missing
diff --git a/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/format.pass.cpp b/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/format.pass.cpp
index fc993efd0db3..675a5e896ff5 100644
--- a/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/format.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/format.pass.cpp
@@ -7,9 +7,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
-// TODO FMT Investigate why this fails.
-// UNSUPPORTED: stdlib=apple-libc++ && target={{.+}}-apple-macosx{{10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0|12.0}}
-
// <format>
// template<ranges::input_range R, class charT>
diff --git a/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/parse.pass.cpp b/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/parse.pass.cpp
index a24fc06fae56..7acee9cb9dc5 100644
--- a/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/parse.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.range/format.range.fmtstr/parse.pass.cpp
@@ -7,9 +7,6 @@
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
-// TODO FMT Investigate why this fails.
-// UNSUPPORTED: stdlib=apple-libc++ && target={{.+}}-apple-macosx{{10.9|10.10|10.11|10.12|10.13|10.14|10.15|11.0|12.0}}
-
// <format>
// template<ranges::input_range R, class charT>
diff --git a/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp b/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp
index 019a6aa03853..c928ba24f1bf 100644
--- a/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp
+++ b/libcxx/test/std/utilities/memory/temporary.buffer/overaligned.pass.cpp
@@ -11,7 +11,7 @@
// Aligned allocations are not supported on macOS < 10.13
// Note: use 'unsupported' instead of 'xfail' to ensure
// we won't pass prior to c++17.
-// UNSUPPORTED: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11|12}}
+// XFAIL: stdlib=apple-libc++ && target={{.+}}-apple-macosx10.{{9|10|11|12}}
// ADDITIONAL_COMPILE_FLAGS: -D_LIBCPP_DISABLE_DEPRECATION_WARNINGS
diff --git a/lldb/docs/resources/test.rst b/lldb/docs/resources/test.rst
index 52757864539e..2b0e9010fe28 100644
--- a/lldb/docs/resources/test.rst
+++ b/lldb/docs/resources/test.rst
@@ -17,8 +17,8 @@ The LLDB test suite consists of three different kinds of test:
the output.
* **API tests**: Integration tests that interact with the debugger through the
SB API. These are written in Python and use LLDB's ``dotest.py`` testing
- framework on top of Python's `unittest2
- <https://docs.python.org/2/library/unittest.html>`_.
+ framework on top of Python's `unittest
+ <https://docs.python.org/3/library/unittest.html>`_.
All three test suites use ``lit`` (`LLVM Integrated Tester
<https://llvm.org/docs/CommandGuide/lit.html>`_ ) as the test driver. The test
@@ -94,7 +94,7 @@ programs from source, run them, and debug the processes.
As mentioned before, ``dotest.py`` is LLDB's testing framework. The
implementation is located under ``lldb/packages/Python/lldbsuite``. We have
several extensions and custom test primitives on top of what's offered by
-`unittest2 <https://docs.python.org/2/library/unittest.html>`_. Those can be
+`unittest <https://docs.python.org/3/library/unittest.html>`_. Those can be
found in
`lldbtest.py <https://github.com/llvm/llvm-project/blob/main/lldb/packages/Python/lldbsuite/test/lldbtest.py>`_.
@@ -146,7 +146,7 @@ the test should be run or not.
::
- @expectedFailure(checking_function_name)
+ @skipTestIfFn(checking_function_name)
In addition to providing a lot more flexibility when it comes to writing the
test, the API test also allow for much more complex scenarios when it comes to
diff --git a/lldb/docs/testsuite/a-detailed-walkthrough.txt b/lldb/docs/testsuite/a-detailed-walkthrough.txt
index 57c9dbce3d0a..8a7043786c19 100644
--- a/lldb/docs/testsuite/a-detailed-walkthrough.txt
+++ b/lldb/docs/testsuite/a-detailed-walkthrough.txt
@@ -58,16 +58,15 @@ display their output. For brevity, the '-t' output is not included here.
Notice the 'expected failures=1' message at the end of the run. This is because
of a bug currently in lldb such that setting target.process.output-path to
'stdout.txt' does not have any effect on the redirection of the standard output
-of the subsequent launched process. We are using unittest2 (a backport of new
-unittest features for Python 2.4-2.6) to decorate (mark) the particular test
-method as such:
+of the subsequent launched process. We are using unittest to decorate (mark)
+the particular test method as such:
- @unittest2.expectedFailure
+ @unittest.expectedFailure
# rdar://problem/8435794
# settings set target.process.output-path does not seem to work
def test_set_output_path(self):
-See http://pypi.python.org/pypi/unittest2 for more details.
+See http://docs.python.org/library/unittest.html for more details.
Now let's look inside the test method:
diff --git a/lldb/include/lldb/Interpreter/OptionValueSInt64.h b/lldb/include/lldb/Interpreter/OptionValueSInt64.h
index 5efae627758a..3cf41d38c0ef 100644
--- a/lldb/include/lldb/Interpreter/OptionValueSInt64.h
+++ b/lldb/include/lldb/Interpreter/OptionValueSInt64.h
@@ -86,8 +86,8 @@ public:
protected:
int64_t m_current_value = 0;
int64_t m_default_value = 0;
- int64_t m_min_value = INT64_MIN;
- int64_t m_max_value = INT64_MAX;
+ int64_t m_min_value = std::numeric_limits<int64_t>::min();
+ int64_t m_max_value = std::numeric_limits<int64_t>::max();
};
} // namespace lldb_private
diff --git a/lldb/include/lldb/Interpreter/OptionValueUInt64.h b/lldb/include/lldb/Interpreter/OptionValueUInt64.h
index 30c27bf73d99..07076075790c 100644
--- a/lldb/include/lldb/Interpreter/OptionValueUInt64.h
+++ b/lldb/include/lldb/Interpreter/OptionValueUInt64.h
@@ -64,13 +64,35 @@ public:
uint64_t GetDefaultValue() const { return m_default_value; }
- void SetCurrentValue(uint64_t value) { m_current_value = value; }
+ bool SetCurrentValue(uint64_t value) {
+ if (value >= m_min_value && value <= m_max_value) {
+ m_current_value = value;
+ return true;
+ }
+ return false;
+ }
+
+ bool SetDefaultValue(uint64_t value) {
+ if (value >= m_min_value && value <= m_max_value) {
+ m_default_value = value;
+ return true;
+ }
+ return false;
+ }
+
+ void SetMinimumValue(int64_t v) { m_min_value = v; }
+
+ uint64_t GetMinimumValue() const { return m_min_value; }
+
+ void SetMaximumValue(int64_t v) { m_max_value = v; }
- void SetDefaultValue(uint64_t value) { m_default_value = value; }
+ uint64_t GetMaximumValue() const { return m_max_value; }
protected:
uint64_t m_current_value = 0;
uint64_t m_default_value = 0;
+ uint64_t m_min_value = std::numeric_limits<uint64_t>::min();
+ uint64_t m_max_value = std::numeric_limits<uint64_t>::max();
};
} // namespace lldb_private
diff --git a/lldb/include/lldb/Interpreter/Options.h b/lldb/include/lldb/Interpreter/Options.h
index bf74927cf99d..18a87e49deee 100644
--- a/lldb/include/lldb/Interpreter/Options.h
+++ b/lldb/include/lldb/Interpreter/Options.h
@@ -336,6 +336,39 @@ public:
bool m_did_finalize = false;
};
+/// Creates an error that represents the failure to parse an command line option
+/// argument. This creates an error containing all information needed to show
+/// the developer what went wrong when parsing their command. It is recommended
+/// to use this instead of writing an error by hand.
+///
+/// \param[in] option_arg
+/// The argument that was attempted to be parsed.
+///
+/// \param[in] short_option
+/// The short form of the option. For example, if the flag is -f, the short
+/// option is "f".
+///
+/// \param[in] long_option
+/// The long form of the option. This field is optional. If the flag is
+/// --force, then the long option is "force".
+///
+/// \param[in] additional_context
+/// This is extra context that will get included in the error. This field is
+/// optional.
+///
+/// \return
+/// An llvm::Error that contains a standardized format for what went wrong
+/// when parsing and why.
+llvm::Error CreateOptionParsingError(llvm::StringRef option_arg,
+ const char short_option,
+ llvm::StringRef long_option = {},
+ llvm::StringRef additional_context = {});
+
+static constexpr llvm::StringLiteral g_bool_parsing_error_message =
+ "Failed to parse as boolean";
+static constexpr llvm::StringLiteral g_int_parsing_error_message =
+ "Failed to parse as integer";
+
} // namespace lldb_private
#endif // LLDB_INTERPRETER_OPTIONS_H
diff --git a/lldb/packages/Python/lldbsuite/test/README-TestSuite b/lldb/packages/Python/lldbsuite/test/README-TestSuite
index f76e836ab777..388f94da0c40 100644
--- a/lldb/packages/Python/lldbsuite/test/README-TestSuite
+++ b/lldb/packages/Python/lldbsuite/test/README-TestSuite
@@ -91,20 +91,6 @@ to the Python test suite under the current 'test' directory.
Contains platform specific plugin to build binaries with dsym/dwarf debugging
info. Other platform specific functionalities may be added in the future.
-- unittest2 directory
-
- Many new features were added to unittest in Python 2.7, including test
- discovery. unittest2 allows you to use these features with earlier versions of
- Python.
-
- It currently has unittest2 0.5.1 from http://pypi.python.org/pypi/unittest2.
- Version 0.5.1 of unittest2 has feature parity with unittest in Python 2.7
- final. If you want to ensure that your tests run identically under unittest2
- and unittest in Python 2.7 you should use unittest2 0.5.1.
-
- Later versions of unittest2 include changes in unittest made in Python 3.2 and
- onwards after the release of Python 2.7.
-
- Profiling dotest.py runs
I used the following command line thingy to do the profiling on a SnowLeopard
diff --git a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
index bb863bb87191..27a76a652f40 100644
--- a/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
+++ b/lldb/packages/Python/lldbsuite/test/tools/lldb-dap/dap_server.py
@@ -501,6 +501,18 @@ class DebugCommunication(object):
return variable["value"]
return None
+ def get_local_variable_child(self, name, child_name, frameIndex=0, threadId=None):
+ local = self.get_local_variable(name, frameIndex, threadId)
+ if local["variablesReference"] == 0:
+ return None
+ children = self.request_variables(local["variablesReference"])["body"][
+ "variables"
+ ]
+ for child in children:
+ if child["name"] == child_name:
+ return child
+ return None
+
def replay_packets(self, replay_file_path):
f = open(replay_file_path, "r")
mode = "invalid"
@@ -895,6 +907,41 @@ class DebugCommunication(object):
}
return self.send_recv(command_dict)
+ def request_dataBreakpointInfo(
+ self, variablesReference, name, frameIndex=0, threadId=None
+ ):
+ stackFrame = self.get_stackFrame(frameIndex=frameIndex, threadId=threadId)
+ if stackFrame is None:
+ return []
+ args_dict = {
+ "variablesReference": variablesReference,
+ "name": name,
+ "frameId": stackFrame["id"],
+ }
+ command_dict = {
+ "command": "dataBreakpointInfo",
+ "type": "request",
+ "arguments": args_dict,
+ }
+ return self.send_recv(command_dict)
+
+ def request_setDataBreakpoint(self, dataBreakpoints):
+ """dataBreakpoints is a list of dictionary with following fields:
+ {
+ dataId: (address in hex)/(size in bytes)
+ accessType: read/write/readWrite
+ [condition]: string
+ [hitCondition]: string
+ }
+ """
+ args_dict = {"breakpoints": dataBreakpoints}
+ command_dict = {
+ "command": "setDataBreakpoints",
+ "type": "request",
+ "arguments": args_dict,
+ }
+ return self.send_recv(command_dict)
+
def request_compileUnits(self, moduleId):
args_dict = {"moduleId": moduleId}
command_dict = {
diff --git a/lldb/source/Commands/CommandObjectBreakpoint.cpp b/lldb/source/Commands/CommandObjectBreakpoint.cpp
index 3fdf5cd3cd43..fc2217608a0b 100644
--- a/lldb/source/Commands/CommandObjectBreakpoint.cpp
+++ b/lldb/source/Commands/CommandObjectBreakpoint.cpp
@@ -64,6 +64,8 @@ public:
Status error;
const int short_option =
g_breakpoint_modify_options[option_idx].short_option;
+ const char *long_option =
+ g_breakpoint_modify_options[option_idx].long_option;
switch (short_option) {
case 'c':
@@ -84,18 +86,17 @@ public:
case 'G': {
bool value, success;
value = OptionArgParser::ToBoolean(option_arg, false, &success);
- if (success) {
+ if (success)
m_bp_opts.SetAutoContinue(value);
- } else
- error.SetErrorStringWithFormat(
- "invalid boolean value '%s' passed for -G option",
- option_arg.str().c_str());
+ else
+ error = CreateOptionParsingError(option_arg, short_option, long_option,
+ g_bool_parsing_error_message);
} break;
case 'i': {
uint32_t ignore_count;
if (option_arg.getAsInteger(0, ignore_count))
- error.SetErrorStringWithFormat("invalid ignore count '%s'",
- option_arg.str().c_str());
+ error = CreateOptionParsingError(option_arg, short_option, long_option,
+ g_int_parsing_error_message);
else
m_bp_opts.SetIgnoreCount(ignore_count);
} break;
@@ -105,27 +106,29 @@ public:
if (success) {
m_bp_opts.SetOneShot(value);
} else
- error.SetErrorStringWithFormat(
- "invalid boolean value '%s' passed for -o option",
- option_arg.str().c_str());
+ error = CreateOptionParsingError(option_arg, short_option, long_option,
+ g_bool_parsing_error_message);
} break;
case 't': {
lldb::tid_t thread_id = LLDB_INVALID_THREAD_ID;
if (option_arg == "current") {
if (!execution_context) {
- error.SetErrorStringWithFormat("No context to determine current "
- "thread");
+ error = CreateOptionParsingError(
+ option_arg, short_option, long_option,
+ "No context to determine current thread");
} else {
ThreadSP ctx_thread_sp = execution_context->GetThreadSP();
if (!ctx_thread_sp || !ctx_thread_sp->IsValid()) {
- error.SetErrorStringWithFormat("No currently selected thread");
+ error =
+ CreateOptionParsingError(option_arg, short_option, long_option,
+ "No currently selected thread");
} else {
thread_id = ctx_thread_sp->GetID();
}
}
} else if (option_arg.getAsInteger(0, thread_id)) {
- error.SetErrorStringWithFormat("invalid thread id string '%s'",
- option_arg.str().c_str());
+ error = CreateOptionParsingError(option_arg, short_option, long_option,
+ g_int_parsing_error_message);
}
if (thread_id != LLDB_INVALID_THREAD_ID)
m_bp_opts.SetThreadID(thread_id);
@@ -139,8 +142,8 @@ public:
case 'x': {
uint32_t thread_index = UINT32_MAX;
if (option_arg.getAsInteger(0, thread_index)) {
- error.SetErrorStringWithFormat("invalid thread index string '%s'",
- option_arg.str().c_str());
+ error = CreateOptionParsingError(option_arg, short_option, long_option,
+ g_int_parsing_error_message);
} else {
m_bp_opts.GetThreadSpec()->SetIndex(thread_index);
}
diff --git a/lldb/source/Core/CoreProperties.td b/lldb/source/Core/CoreProperties.td
index 4cfff805688c..a6cb951187a0 100644
--- a/lldb/source/Core/CoreProperties.td
+++ b/lldb/source/Core/CoreProperties.td
@@ -132,7 +132,7 @@ let Definition = "debugger" in {
Global,
DefaultStringValue<"${ansi.normal}">,
Desc<"When displaying the line marker in a color-enabled terminal, use the ANSI terminal code specified in this format immediately after the line to be marked.">;
- def TerminalWidth: Property<"term-width", "SInt64">,
+ def TerminalWidth: Property<"term-width", "UInt64">,
Global,
DefaultUnsignedValue<80>,
Desc<"The maximum number of columns to use for displaying text.">;
diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp
index 97311b4716ac..c3e603dbae89 100644
--- a/lldb/source/Core/Debugger.cpp
+++ b/lldb/source/Core/Debugger.cpp
@@ -365,7 +365,7 @@ bool Debugger::SetREPLLanguage(lldb::LanguageType repl_lang) {
uint64_t Debugger::GetTerminalWidth() const {
const uint32_t idx = ePropertyTerminalWidth;
- return GetPropertyAtIndexAs<int64_t>(
+ return GetPropertyAtIndexAs<uint64_t>(
idx, g_debugger_properties[idx].default_uint_value);
}
@@ -886,8 +886,8 @@ Debugger::Debugger(lldb::LogOutputCallback log_callback, void *baton)
}
assert(m_dummy_target_sp.get() && "Couldn't construct dummy target?");
- OptionValueSInt64 *term_width =
- m_collection_sp->GetPropertyAtIndexAsOptionValueSInt64(
+ OptionValueUInt64 *term_width =
+ m_collection_sp->GetPropertyAtIndexAsOptionValueUInt64(
ePropertyTerminalWidth);
term_width->SetMinimumValue(10);
term_width->SetMaximumValue(1024);
diff --git a/lldb/source/Interpreter/OptionValueUInt64.cpp b/lldb/source/Interpreter/OptionValueUInt64.cpp
index 1999c63d11af..2e69c164e32a 100644
--- a/lldb/source/Interpreter/OptionValueUInt64.cpp
+++ b/lldb/source/Interpreter/OptionValueUInt64.cpp
@@ -47,9 +47,16 @@ Status OptionValueUInt64::SetValueFromString(llvm::StringRef value_ref,
llvm::StringRef value_trimmed = value_ref.trim();
uint64_t value;
if (llvm::to_integer(value_trimmed, value)) {
- m_value_was_set = true;
- m_current_value = value;
- NotifyValueChanged();
+ if (value >= m_min_value && value <= m_max_value) {
+ m_value_was_set = true;
+ m_current_value = value;
+ NotifyValueChanged();
+ } else {
+ error.SetErrorStringWithFormat(
+ "%" PRIu64 " is out of range, valid values must be between %" PRIu64
+ " and %" PRIu64 ".",
+ value, m_min_value, m_max_value);
+ }
} else {
error.SetErrorStringWithFormat("invalid uint64_t string value: '%s'",
value_ref.str().c_str());
diff --git a/lldb/source/Interpreter/Options.cpp b/lldb/source/Interpreter/Options.cpp
index 89fe69009d90..51b7e6b26b6e 100644
--- a/lldb/source/Interpreter/Options.cpp
+++ b/lldb/source/Interpreter/Options.cpp
@@ -1365,3 +1365,16 @@ llvm::Expected<Args> Options::Parse(const Args &args,
argv.erase(argv.begin(), argv.begin() + OptionParser::GetOptionIndex());
return ReconstituteArgsAfterParsing(argv, args);
}
+
+llvm::Error lldb_private::CreateOptionParsingError(
+ llvm::StringRef option_arg, const char short_option,
+ llvm::StringRef long_option, llvm::StringRef additional_context) {
+ std::string buffer;
+ llvm::raw_string_ostream stream(buffer);
+ stream << "Invalid value ('" << option_arg << "') for -" << short_option;
+ if (!long_option.empty())
+ stream << " (" << long_option << ")";
+ if (!additional_context.empty())
+ stream << ": " << additional_context;
+ return llvm::createStringError(llvm::inconvertibleErrorCode(), buffer);
+}
diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp
index 23a8a66645c0..137795cb8cec 100644
--- a/lldb/source/Target/Process.cpp
+++ b/lldb/source/Target/Process.cpp
@@ -2937,14 +2937,11 @@ void Process::CompleteAttach() {
DidAttach(process_arch);
if (process_arch.IsValid()) {
+ LLDB_LOG(log,
+ "Process::{0} replacing process architecture with DidAttach() "
+ "architecture: \"{1}\"",
+ __FUNCTION__, process_arch.GetTriple().getTriple());
GetTarget().SetArchitecture(process_arch);
- if (log) {
- const char *triple_str = process_arch.GetTriple().getTriple().c_str();
- LLDB_LOGF(log,
- "Process::%s replacing process architecture with DidAttach() "
- "architecture: %s",
- __FUNCTION__, triple_str ? triple_str : "<null>");
- }
}
// We just attached. If we have a platform, ask it for the process
diff --git a/lldb/test/API/commands/expression/call-throws/TestCallThatThrows.py b/lldb/test/API/commands/expression/call-throws/TestCallThatThrows.py
index 2868ec5ffdbd..b8cc87c93ba6 100644
--- a/lldb/test/API/commands/expression/call-throws/TestCallThatThrows.py
+++ b/lldb/test/API/commands/expression/call-throws/TestCallThatThrows.py
@@ -46,7 +46,7 @@ class ExprCommandWithThrowTestCase(TestBase):
value = frame.EvaluateExpression("[my_class callMeIThrow]", options)
self.assertTrue(value.IsValid())
- self.assertEqual(value.GetError().Success(), False)
+ self.assertFalse(value.GetError().Success())
self.check_after_call()
diff --git a/lldb/test/API/commands/expression/dont_allow_jit/TestAllowJIT.py b/lldb/test/API/commands/expression/dont_allow_jit/TestAllowJIT.py
index 307d4521427d..eb812f1902e6 100644
--- a/lldb/test/API/commands/expression/dont_allow_jit/TestAllowJIT.py
+++ b/lldb/test/API/commands/expression/dont_allow_jit/TestAllowJIT.py
@@ -54,7 +54,7 @@ class TestAllowJIT(TestBase):
# First make sure we can call the function with the default option set.
options = lldb.SBExpressionOptions()
# Check that the default is to allow JIT:
- self.assertEqual(options.GetAllowJIT(), True, "Default is true")
+ self.assertTrue(options.GetAllowJIT(), "Default is true")
# Now use the options:
result = frame.EvaluateExpression("call_me(10)", options)
@@ -64,9 +64,7 @@ class TestAllowJIT(TestBase):
# Now disallow JIT and make sure it fails:
options.SetAllowJIT(False)
# Check that we got the right value:
- self.assertEqual(
- options.GetAllowJIT(), False, "Got False after setting to False"
- )
+ self.assertFalse(options.GetAllowJIT(), "Got False after setting to False")
# Again use it and ensure we fail:
result = frame.EvaluateExpression("call_me(10)", options)
@@ -79,7 +77,7 @@ class TestAllowJIT(TestBase):
# Finally set the allow JIT value back to true and make sure that works:
options.SetAllowJIT(True)
- self.assertEqual(options.GetAllowJIT(), True, "Set back to True correctly")
+ self.assertTrue(options.GetAllowJIT(), "Set back to True correctly")
# And again, make sure this works:
result = frame.EvaluateExpression("call_me(10)", options)
diff --git a/lldb/test/API/commands/settings/TestSettings.py b/lldb/test/API/commands/settings/TestSettings.py
index a2d845493d1d..104a9f09788c 100644
--- a/lldb/test/API/commands/settings/TestSettings.py
+++ b/lldb/test/API/commands/settings/TestSettings.py
@@ -2,7 +2,6 @@
Test lldb settings command.
"""
-
import json
import os
import re
@@ -151,14 +150,22 @@ class SettingsCommandTestCase(TestBase):
self.expect(
"settings show term-width",
SETTING_MSG("term-width"),
- startstr="term-width (int) = 70",
+ startstr="term-width (unsigned) = 70",
)
# The overall display should also reflect the new setting.
self.expect(
"settings show",
SETTING_MSG("term-width"),
- substrs=["term-width (int) = 70"],
+ substrs=["term-width (unsigned) = 70"],
+ )
+
+ self.dbg.SetTerminalWidth(60)
+
+ self.expect(
+ "settings show",
+ SETTING_MSG("term-width"),
+ substrs=["term-width (unsigned) = 60"],
)
# rdar://problem/10712130
@@ -593,7 +600,7 @@ class SettingsCommandTestCase(TestBase):
self.expect(
"settings show term-width",
SETTING_MSG("term-width"),
- startstr="term-width (int) = 60",
+ startstr="term-width (unsigned) = 60",
)
self.runCmd("settings clear term-width", check=False)
# string
diff --git a/lldb/test/API/commands/statistics/basic/TestStats.py b/lldb/test/API/commands/statistics/basic/TestStats.py
index 6f083222227f..fb6fc07d2d44 100644
--- a/lldb/test/API/commands/statistics/basic/TestStats.py
+++ b/lldb/test/API/commands/statistics/basic/TestStats.py
@@ -35,17 +35,13 @@ class TestCase(TestBase):
)
def verify_key_in_dict(self, key, d, description):
- self.assertEqual(
- key in d,
- True,
- 'make sure key "%s" is in dictionary %s' % (key, description),
+ self.assertIn(
+ key, d, 'make sure key "%s" is in dictionary %s' % (key, description)
)
def verify_key_not_in_dict(self, key, d, description):
- self.assertEqual(
- key in d,
- False,
- 'make sure key "%s" is in dictionary %s' % (key, description),
+ self.assertNotIn(
+ key, d, 'make sure key "%s" is in dictionary %s' % (key, description)
)
def verify_keys(self, dict, description, keys_exist, keys_missing=None):
@@ -120,9 +116,7 @@ class TestCase(TestBase):
self.verify_success_fail_count(stats, "frameVariable", 1, 0)
# Test that "stopCount" is available when the process has run
- self.assertEqual(
- "stopCount" in stats, True, 'ensure "stopCount" is in target JSON'
- )
+ self.assertIn("stopCount", stats, 'ensure "stopCount" is in target JSON')
self.assertGreater(
stats["stopCount"], 0, 'make sure "stopCount" is greater than zero'
)
@@ -484,9 +478,9 @@ class TestCase(TestBase):
exe = self.getBuildArtifact(exe_name)
dsym = self.getBuildArtifact(exe_name + ".dSYM")
# Make sure the executable file exists after building.
- self.assertEqual(os.path.exists(exe), True)
+ self.assertTrue(os.path.exists(exe))
# Make sure the dSYM file exists after building.
- self.assertEqual(os.path.isdir(dsym), True)
+ self.assertTrue(os.path.isdir(dsym))
# Create the target
target = self.createTestTarget(file_path=exe)
@@ -532,9 +526,9 @@ class TestCase(TestBase):
exe = self.getBuildArtifact(exe_name)
dsym = self.getBuildArtifact(exe_name + ".dSYM")
# Make sure the executable file exists after building.
- self.assertEqual(os.path.exists(exe), True)
+ self.assertTrue(os.path.exists(exe))
# Make sure the dSYM file doesn't exist after building.
- self.assertEqual(os.path.isdir(dsym), False)
+ self.assertFalse(os.path.isdir(dsym))
# Create the target
target = self.createTestTarget(file_path=exe)
@@ -585,11 +579,11 @@ class TestCase(TestBase):
dsym = self.getBuildArtifact(exe_name + ".dSYM")
main_obj = self.getBuildArtifact("main.o")
# Make sure the executable file exists after building.
- self.assertEqual(os.path.exists(exe), True)
+ self.assertTrue(os.path.exists(exe))
# Make sure the dSYM file doesn't exist after building.
- self.assertEqual(os.path.isdir(dsym), False)
+ self.assertFalse(os.path.isdir(dsym))
# Make sure the main.o object file exists after building.
- self.assertEqual(os.path.exists(main_obj), True)
+ self.assertTrue(os.path.exists(main_obj))
# Delete the main.o file that contains the debug info so we force an
# error when we run to main and try to get variables
@@ -604,7 +598,7 @@ class TestCase(TestBase):
# Make sure we have "debugInfoHadVariableErrors" variable that is set to
# false before failing to get local variables due to missing .o file.
- self.assertEqual(exe_stats["debugInfoHadVariableErrors"], False)
+ self.assertFalse(exe_stats["debugInfoHadVariableErrors"])
# Verify that the top level statistic that aggregates the number of
# modules with debugInfoHadVariableErrors is zero
@@ -624,7 +618,7 @@ class TestCase(TestBase):
# Make sure we have "hadFrameVariableErrors" variable that is set to
# true after failing to get local variables due to missing .o file.
- self.assertEqual(exe_stats["debugInfoHadVariableErrors"], True)
+ self.assertTrue(exe_stats["debugInfoHadVariableErrors"])
# Verify that the top level statistic that aggregates the number of
# modules with debugInfoHadVariableErrors is greater than zero
diff --git a/lldb/test/API/commands/trace/TestTraceSave.py b/lldb/test/API/commands/trace/TestTraceSave.py
index ef1ab2f7aa41..af38669cb4fc 100644
--- a/lldb/test/API/commands/trace/TestTraceSave.py
+++ b/lldb/test/API/commands/trace/TestTraceSave.py
@@ -179,11 +179,11 @@ class TestTraceSave(TraceIntelPTTestCaseBase):
res = lldb.SBCommandReturnObject()
ci.HandleCommand("thread trace dump instructions -c 10 --forwards", res)
- self.assertEqual(res.Succeeded(), True)
+ self.assertTrue(res.Succeeded())
first_ten_instructions = res.GetOutput()
ci.HandleCommand("thread trace dump instructions -c 10", res)
- self.assertEqual(res.Succeeded(), True)
+ self.assertTrue(res.Succeeded())
last_ten_instructions = res.GetOutput()
# Now, save the trace to <trace_copy_dir>
@@ -203,11 +203,11 @@ class TestTraceSave(TraceIntelPTTestCaseBase):
# Compare with instructions saved at the first time
ci.HandleCommand("thread trace dump instructions -c 10 --forwards", res)
- self.assertEqual(res.Succeeded(), True)
+ self.assertTrue(res.Succeeded())
self.assertEqual(res.GetOutput(), first_ten_instructions)
ci.HandleCommand("thread trace dump instructions -c 10", res)
- self.assertEqual(res.Succeeded(), True)
+ self.assertTrue(res.Succeeded())
self.assertEqual(res.GetOutput(), last_ten_instructions)
def testSaveKernelTrace(self):
diff --git a/lldb/test/API/functionalities/breakpoint/address_breakpoints/TestBadAddressBreakpoints.py b/lldb/test/API/functionalities/breakpoint/address_breakpoints/TestBadAddressBreakpoints.py
index 0ab11a427c10..d120692e4d6e 100644
--- a/lldb/test/API/functionalities/breakpoint/address_breakpoints/TestBadAddressBreakpoints.py
+++ b/lldb/test/API/functionalities/breakpoint/address_breakpoints/TestBadAddressBreakpoints.py
@@ -40,7 +40,7 @@ class BadAddressBreakpointTestCase(TestBase):
bkpt = target.BreakpointCreateByAddress(illegal_address)
# Verify that breakpoint is not resolved.
for bp_loc in bkpt:
- self.assertEqual(bp_loc.IsResolved(), False)
+ self.assertFalse(bp_loc.IsResolved())
else:
self.fail(
"Could not find an illegal address at which to set a bad breakpoint."
diff --git a/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py b/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py
index 620f648d51fd..ea242952e409 100644
--- a/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py
+++ b/lldb/test/API/functionalities/breakpoint/breakpoint_command/TestBreakpointCommand.py
@@ -572,9 +572,9 @@ class BreakpointCommandTestCase(TestBase):
res = target.GetStatistics().GetAsJSON(stream)
self.assertTrue(res.Success())
debug_stats = json.loads(stream.GetData())
- self.assertEqual(
- "targets" in debug_stats,
- True,
+ self.assertIn(
+ "targets",
+ debug_stats,
'Make sure the "targets" key in in target.GetStatistics()',
)
target_stats = debug_stats["targets"][0]
@@ -659,9 +659,9 @@ class BreakpointCommandTestCase(TestBase):
res = target.GetStatistics().GetAsJSON(stream)
self.assertTrue(res.Success())
debug_stats = json.loads(stream.GetData())
- self.assertEqual(
- "targets" in debug_stats,
- True,
+ self.assertIn(
+ "targets",
+ debug_stats,
'Make sure the "targets" key in in target.GetStatistics()',
)
target_stats = debug_stats["targets"][0]
diff --git a/lldb/test/API/functionalities/breakpoint/breakpoint_names/TestBreakpointNames.py b/lldb/test/API/functionalities/breakpoint/breakpoint_names/TestBreakpointNames.py
index 330f916a907e..0f9510c4507d 100644
--- a/lldb/test/API/functionalities/breakpoint/breakpoint_names/TestBreakpointNames.py
+++ b/lldb/test/API/functionalities/breakpoint/breakpoint_names/TestBreakpointNames.py
@@ -389,7 +389,7 @@ class BreakpointNames(TestBase):
)
def check_permission_results(self, bp_name):
- self.assertEqual(bp_name.GetAllowDelete(), False, "Didn't set allow delete.")
+ self.assertFalse(bp_name.GetAllowDelete(), "Didn't set allow delete.")
protected_bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
protected_id = protected_bkpt.GetID()
@@ -402,14 +402,11 @@ class BreakpointNames(TestBase):
self.assertSuccess(success, "Couldn't add this name to the breakpoint")
self.target.DisableAllBreakpoints()
- self.assertEqual(
- protected_bkpt.IsEnabled(),
- True,
- "Didnt' keep breakpoint from being disabled",
+ self.assertTrue(
+ protected_bkpt.IsEnabled(), "Didnt' keep breakpoint from being disabled"
)
- self.assertEqual(
+ self.assertFalse(
unprotected_bkpt.IsEnabled(),
- False,
"Protected too many breakpoints from disabling.",
)
@@ -418,14 +415,11 @@ class BreakpointNames(TestBase):
result = lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand("break disable", result)
self.assertTrue(result.Succeeded())
- self.assertEqual(
- protected_bkpt.IsEnabled(),
- True,
- "Didnt' keep breakpoint from being disabled",
+ self.assertTrue(
+ protected_bkpt.IsEnabled(), "Didnt' keep breakpoint from being disabled"
)
- self.assertEqual(
+ self.assertFalse(
unprotected_bkpt.IsEnabled(),
- False,
"Protected too many breakpoints from disabling.",
)
diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestJLink6Armv7RegisterDefinition.py b/lldb/test/API/functionalities/gdb_remote_client/TestJLink6Armv7RegisterDefinition.py
index eb7c036c8d60..3a426620af55 100644
--- a/lldb/test/API/functionalities/gdb_remote_client/TestJLink6Armv7RegisterDefinition.py
+++ b/lldb/test/API/functionalities/gdb_remote_client/TestJLink6Armv7RegisterDefinition.py
@@ -198,7 +198,7 @@ class TestJLink6Armv7RegisterDefinition(GDBRemoteTestBase):
error = lldb.SBError()
data = lldb.SBData()
data.SetData(error, val, lldb.eByteOrderBig, 4)
- self.assertEqual(r1_valobj.SetData(data, error), True)
+ self.assertTrue(r1_valobj.SetData(data, error))
self.assertSuccess(error)
r1_valobj = process.GetThreadAtIndex(0).GetFrameAtIndex(0).FindRegister("r1")
diff --git a/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py b/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py
index 4214bd108bfc..abf4cf3944e1 100644
--- a/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py
+++ b/lldb/test/API/functionalities/module_cache/simple_exe/TestModuleCacheSimple.py
@@ -66,18 +66,16 @@ class ModuleCacheTestcaseSimple(TestBase):
# get a different creation and modification time for the file since some
# OSs store the modification time in seconds since Jan 1, 1970.
os.remove(exe)
- self.assertEqual(
- os.path.exists(exe),
- False,
- "make sure we were able to remove the executable",
+ self.assertFalse(
+ os.path.exists(exe), "make sure we were able to remove the executable"
)
time.sleep(2)
# Now rebuild the binary so it has a different content which should
# update the UUID to make the cache miss when it tries to load the
# symbol table from the binary at the same path.
self.build(dictionary={"CFLAGS_EXTRAS": "-DEXTRA_FUNCTION"})
- self.assertEqual(
- os.path.exists(exe), True, "make sure executable exists after rebuild"
+ self.assertTrue(
+ os.path.exists(exe), "make sure executable exists after rebuild"
)
# Make sure the modification time has changed or this test will fail.
exe_mtime_2 = os.path.getmtime(exe)
@@ -99,9 +97,8 @@ class ModuleCacheTestcaseSimple(TestBase):
main_module = target.GetModuleAtIndex(0)
self.assertTrue(main_module.IsValid())
main_module.GetNumSymbols()
- self.assertEqual(
+ self.assertTrue(
os.path.exists(symtab_cache_path),
- True,
'make sure "symtab" cache files exists after cache is updated',
)
symtab_mtime_2 = os.path.getmtime(symtab_cache_path)
diff --git a/lldb/test/API/functionalities/progress_reporting/TestTrimmedProgressReporting.py b/lldb/test/API/functionalities/progress_reporting/TestTrimmedProgressReporting.py
index 357999b6f561..ee35dbd23b3d 100644
--- a/lldb/test/API/functionalities/progress_reporting/TestTrimmedProgressReporting.py
+++ b/lldb/test/API/functionalities/progress_reporting/TestTrimmedProgressReporting.py
@@ -24,7 +24,8 @@ class TestTrimmedProgressReporting(PExpectTest):
)
self.expect("set set term-width " + str(term_width))
self.expect(
- "set show term-width", substrs=["term-width (int) = " + str(term_width)]
+ "set show term-width",
+ substrs=["term-width (unsigned) = " + str(term_width)],
)
self.child.send("file " + self.getBuildArtifact("a.out") + "\n")
diff --git a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py
index eee91bfadead..851097bdfecf 100644
--- a/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py
+++ b/lldb/test/API/functionalities/stats_api/TestStatisticsAPI.py
@@ -33,47 +33,47 @@ class TestStatsAPI(TestBase):
stream = lldb.SBStream()
res = stats.GetAsJSON(stream)
debug_stats = json.loads(stream.GetData())
- self.assertEqual(
- "targets" in debug_stats,
- True,
+ self.assertIn(
+ "targets",
+ debug_stats,
'Make sure the "targets" key in in target.GetStatistics()',
)
- self.assertEqual(
- "modules" in debug_stats,
- True,
+ self.assertIn(
+ "modules",
+ debug_stats,
'Make sure the "modules" key in in target.GetStatistics()',
)
stats_json = debug_stats["targets"][0]
- self.assertEqual(
- "expressionEvaluation" in stats_json,
- True,
+ self.assertIn(
+ "expressionEvaluation",
+ stats_json,
'Make sure the "expressionEvaluation" key in in target.GetStatistics()["targets"][0]',
)
- self.assertEqual(
- "frameVariable" in stats_json,
- True,
+ self.assertIn(
+ "frameVariable",
+ stats_json,
'Make sure the "frameVariable" key in in target.GetStatistics()["targets"][0]',
)
expressionEvaluation = stats_json["expressionEvaluation"]
- self.assertEqual(
- "successes" in expressionEvaluation,
- True,
+ self.assertIn(
+ "successes",
+ expressionEvaluation,
'Make sure the "successes" key in in "expressionEvaluation" dictionary"',
)
- self.assertEqual(
- "failures" in expressionEvaluation,
- True,
+ self.assertIn(
+ "failures",
+ expressionEvaluation,
'Make sure the "failures" key in in "expressionEvaluation" dictionary"',
)
frameVariable = stats_json["frameVariable"]
- self.assertEqual(
- "successes" in frameVariable,
- True,
+ self.assertIn(
+ "successes",
+ frameVariable,
'Make sure the "successes" key in in "frameVariable" dictionary"',
)
- self.assertEqual(
- "failures" in frameVariable,
- True,
+ self.assertIn(
+ "failures",
+ frameVariable,
'Make sure the "failures" key in in "frameVariable" dictionary"',
)
diff --git a/lldb/test/API/functionalities/thread/backtrace_limit/TestBacktraceLimit.py b/lldb/test/API/functionalities/thread/backtrace_limit/TestBacktraceLimit.py
index 98baea45ce89..fded504f7c61 100644
--- a/lldb/test/API/functionalities/thread/backtrace_limit/TestBacktraceLimit.py
+++ b/lldb/test/API/functionalities/thread/backtrace_limit/TestBacktraceLimit.py
@@ -23,5 +23,5 @@ class BacktraceLimitSettingTest(TestBase):
interp.HandleCommand(
"settings set target.process.thread.max-backtrace-depth 30", result
)
- self.assertEqual(True, result.Succeeded())
+ self.assertTrue(result.Succeeded())
self.assertEqual(30, thread.GetNumFrames())
diff --git a/lldb/test/API/macosx/arm-corefile-regctx/TestArmMachoCorefileRegctx.py b/lldb/test/API/macosx/arm-corefile-regctx/TestArmMachoCorefileRegctx.py
index 1ecb0f466e78..4190ea3ac331 100644
--- a/lldb/test/API/macosx/arm-corefile-regctx/TestArmMachoCorefileRegctx.py
+++ b/lldb/test/API/macosx/arm-corefile-regctx/TestArmMachoCorefileRegctx.py
@@ -28,7 +28,7 @@ class TestArmMachoCorefileRegctx(TestBase):
target = self.dbg.CreateTarget("")
err = lldb.SBError()
process = target.LoadCore(self.corefile)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
thread = process.GetSelectedThread()
frame = thread.GetSelectedFrame()
@@ -51,7 +51,7 @@ class TestArmMachoCorefileRegctx(TestBase):
target = self.dbg.CreateTarget("")
err = lldb.SBError()
process = target.LoadCore(self.corefile)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
thread = process.GetSelectedThread()
frame = thread.GetSelectedFrame()
diff --git a/lldb/test/API/macosx/lc-note/addrable-bits/TestAddrableBitsCorefile.py b/lldb/test/API/macosx/lc-note/addrable-bits/TestAddrableBitsCorefile.py
index 221fe6260088..e56ecfcb14d4 100644
--- a/lldb/test/API/macosx/lc-note/addrable-bits/TestAddrableBitsCorefile.py
+++ b/lldb/test/API/macosx/lc-note/addrable-bits/TestAddrableBitsCorefile.py
@@ -29,7 +29,7 @@ class TestAddrableBitsCorefile(TestBase):
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(
self, "break here", lldb.SBFileSpec("main.c")
)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
found_main = False
for f in thread.frames:
diff --git a/lldb/test/API/macosx/lc-note/firmware-corefile/TestFirmwareCorefiles.py b/lldb/test/API/macosx/lc-note/firmware-corefile/TestFirmwareCorefiles.py
index b9d2055e83a5..db3074d7e794 100644
--- a/lldb/test/API/macosx/lc-note/firmware-corefile/TestFirmwareCorefiles.py
+++ b/lldb/test/API/macosx/lc-note/firmware-corefile/TestFirmwareCorefiles.py
@@ -73,7 +73,7 @@ class TestFirmwareCorefiles(TestBase):
if self.TraceOn():
self.runCmd("script print('loading corefile %s')" % verstr_corefile)
process = target.LoadCore(verstr_corefile)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
if self.TraceOn():
self.runCmd("image list")
self.runCmd("target mod dump sections")
@@ -91,7 +91,7 @@ class TestFirmwareCorefiles(TestBase):
"script print('loading corefile %s')" % verstr_corefile_invalid_ident
)
process = target.LoadCore(verstr_corefile_invalid_ident)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
# Third, try the "kern ver str" corefile where it loads at an address
target = self.dbg.CreateTarget("")
@@ -99,7 +99,7 @@ class TestFirmwareCorefiles(TestBase):
if self.TraceOn():
self.runCmd("script print('loading corefile %s')" % verstr_corefile_addr)
process = target.LoadCore(verstr_corefile_addr)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
if self.TraceOn():
self.runCmd("image list")
self.runCmd("target mod dump sections")
@@ -178,7 +178,7 @@ class TestFirmwareCorefiles(TestBase):
if self.TraceOn():
self.runCmd("script print('loading corefile %s')" % binspec_corefile)
process = target.LoadCore(binspec_corefile)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
if self.TraceOn():
self.runCmd("image list")
self.runCmd("target mod dump sections")
@@ -192,7 +192,7 @@ class TestFirmwareCorefiles(TestBase):
if self.TraceOn():
self.runCmd("script print('loading corefile %s')" % binspec_corefile_addr)
process = target.LoadCore(binspec_corefile_addr)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
if self.TraceOn():
self.runCmd("image list")
self.runCmd("target mod dump sections")
@@ -212,7 +212,7 @@ class TestFirmwareCorefiles(TestBase):
"script print('loading corefile %s')" % binspec_corefile_slideonly
)
process = target.LoadCore(binspec_corefile_slideonly)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
if self.TraceOn():
self.runCmd("image list")
self.runCmd("target mod dump sections")
@@ -352,7 +352,7 @@ class TestFirmwareCorefiles(TestBase):
)
process = target.LoadCore(binspec_corefile_addr)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
if self.TraceOn():
self.runCmd("image list")
self.runCmd("target mod dump sections")
diff --git a/lldb/test/API/macosx/lc-note/kern-ver-str/TestKernVerStrLCNOTE.py b/lldb/test/API/macosx/lc-note/kern-ver-str/TestKernVerStrLCNOTE.py
index 9713c4a85cd6..d4366196c53c 100644
--- a/lldb/test/API/macosx/lc-note/kern-ver-str/TestKernVerStrLCNOTE.py
+++ b/lldb/test/API/macosx/lc-note/kern-ver-str/TestKernVerStrLCNOTE.py
@@ -94,7 +94,7 @@ class TestKernVerStrLCNOTE(TestBase):
self.target = self.dbg.CreateTarget("")
err = lldb.SBError()
self.process = self.target.LoadCore(self.corefile)
- self.assertEqual(self.process.IsValid(), True)
+ self.assertTrue(self.process.IsValid())
if self.TraceOn():
self.runCmd("image list")
self.assertEqual(self.target.GetNumModules(), 1)
diff --git a/lldb/test/API/macosx/lc-note/multiple-binary-corefile/TestMultipleBinaryCorefile.py b/lldb/test/API/macosx/lc-note/multiple-binary-corefile/TestMultipleBinaryCorefile.py
index 0a0bc68646e6..897eab23e05e 100644
--- a/lldb/test/API/macosx/lc-note/multiple-binary-corefile/TestMultipleBinaryCorefile.py
+++ b/lldb/test/API/macosx/lc-note/multiple-binary-corefile/TestMultipleBinaryCorefile.py
@@ -45,7 +45,7 @@ class TestMultipleBinaryCorefile(TestBase):
if self.TraceOn():
print("loading corefile %s" % self.corefile)
process = target.LoadCore(self.corefile)
- self.assertEqual(process.IsValid(), True)
+ self.assertTrue(process.IsValid())
if self.TraceOn():
print("image list after loading corefile:")
self.runCmd("image list")
diff --git a/lldb/test/API/macosx/queues/TestQueues.py b/lldb/test/API/macosx/queues/TestQueues.py
index f2d15bb5ff15..45b52af2fbdb 100644
--- a/lldb/test/API/macosx/queues/TestQueues.py
+++ b/lldb/test/API/macosx/queues/TestQueues.py
@@ -457,9 +457,8 @@ class TestQueues(TestBase):
"doing_the_work_2",
"queue 2's pending item #0 should be doing_the_work_2",
)
- self.assertEqual(
+ self.assertFalse(
queue_performer_2.GetPendingItemAtIndex(9999).IsValid(),
- False,
"queue 2's pending item #9999 is invalid",
)
diff --git a/lldb/test/API/macosx/safe-to-func-call/TestSafeFuncCalls.py b/lldb/test/API/macosx/safe-to-func-call/TestSafeFuncCalls.py
index 6a37b250b162..551cab1269c5 100644
--- a/lldb/test/API/macosx/safe-to-func-call/TestSafeFuncCalls.py
+++ b/lldb/test/API/macosx/safe-to-func-call/TestSafeFuncCalls.py
@@ -49,8 +49,7 @@ class TestSafeFuncCalls(TestBase):
main_thread.SafeToCallFunctions(),
"It is safe to call functions on the main thread",
)
- self.assertEqual(
+ self.assertFalse(
select_thread.SafeToCallFunctions(),
- False,
"It is not safe to call functions on the select thread",
)
diff --git a/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py b/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py
index 64e0770dc0b1..af9749313376 100644
--- a/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py
+++ b/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py
@@ -79,13 +79,13 @@ class SBCommandInterpreterRunOptionsCase(TestBase):
opts = lldb.SBCommandInterpreterRunOptions()
# Check getters with default values
- self.assertEqual(opts.GetStopOnContinue(), False)
- self.assertEqual(opts.GetStopOnError(), False)
- self.assertEqual(opts.GetStopOnCrash(), False)
- self.assertEqual(opts.GetEchoCommands(), True)
- self.assertEqual(opts.GetPrintResults(), True)
- self.assertEqual(opts.GetPrintErrors(), True)
- self.assertEqual(opts.GetAddToHistory(), True)
+ self.assertFalse(opts.GetStopOnContinue())
+ self.assertFalse(opts.GetStopOnError())
+ self.assertFalse(opts.GetStopOnCrash())
+ self.assertTrue(opts.GetEchoCommands())
+ self.assertTrue(opts.GetPrintResults())
+ self.assertTrue(opts.GetPrintErrors())
+ self.assertTrue(opts.GetAddToHistory())
# Invert values
opts.SetStopOnContinue(not opts.GetStopOnContinue())
@@ -97,10 +97,10 @@ class SBCommandInterpreterRunOptionsCase(TestBase):
opts.SetAddToHistory(not opts.GetAddToHistory())
# Check the value changed
- self.assertEqual(opts.GetStopOnContinue(), True)
- self.assertEqual(opts.GetStopOnError(), True)
- self.assertEqual(opts.GetStopOnCrash(), True)
- self.assertEqual(opts.GetEchoCommands(), False)
- self.assertEqual(opts.GetPrintResults(), False)
- self.assertEqual(opts.GetPrintErrors(), False)
- self.assertEqual(opts.GetAddToHistory(), False)
+ self.assertTrue(opts.GetStopOnContinue())
+ self.assertTrue(opts.GetStopOnError())
+ self.assertTrue(opts.GetStopOnCrash())
+ self.assertFalse(opts.GetEchoCommands())
+ self.assertFalse(opts.GetPrintResults())
+ self.assertFalse(opts.GetPrintErrors())
+ self.assertFalse(opts.GetAddToHistory())
diff --git a/lldb/test/API/tools/lldb-dap/databreakpoint/Makefile b/lldb/test/API/tools/lldb-dap/databreakpoint/Makefile
new file mode 100644
index 000000000000..99998b20bcb0
--- /dev/null
+++ b/lldb/test/API/tools/lldb-dap/databreakpoint/Makefile
@@ -0,0 +1,3 @@
+CXX_SOURCES := main.cpp
+
+include Makefile.rules
diff --git a/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
new file mode 100644
index 000000000000..17cdad89aa6d
--- /dev/null
+++ b/lldb/test/API/tools/lldb-dap/databreakpoint/TestDAP_setDataBreakpoints.py
@@ -0,0 +1,131 @@
+"""
+Test lldb-dap dataBreakpointInfo and setDataBreakpoints requests
+"""
+
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+import lldbdap_testcase
+
+
+class TestDAP_setDataBreakpoints(lldbdap_testcase.DAPTestCaseBase):
+ def setUp(self):
+ lldbdap_testcase.DAPTestCaseBase.setUp(self)
+ self.accessTypes = ["read", "write", "readWrite"]
+
+ @skipIfWindows
+ @skipIfRemote
+ def test_expression(self):
+ """Tests setting data breakpoints on expression."""
+ program = self.getBuildArtifact("a.out")
+ self.build_and_launch(program)
+ source = "main.cpp"
+ first_loop_break_line = line_number(source, "// first loop breakpoint")
+ self.set_source_breakpoints(source, [first_loop_break_line])
+ self.continue_to_next_stop()
+ self.dap_server.get_stackFrame()
+ # Test setting write watchpoint using expressions: &x, arr+2
+ response_x = self.dap_server.request_dataBreakpointInfo(0, "&x")
+ response_arr_2 = self.dap_server.request_dataBreakpointInfo(0, "arr+2")
+ # Test response from dataBreakpointInfo request.
+ self.assertEquals(response_x["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_x["body"]["accessTypes"], self.accessTypes)
+ self.assertEquals(response_arr_2["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_arr_2["body"]["accessTypes"], self.accessTypes)
+ dataBreakpoints = [
+ {"dataId": response_x["body"]["dataId"], "accessType": "write"},
+ {"dataId": response_arr_2["body"]["dataId"], "accessType": "write"},
+ ]
+ set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints)
+ self.assertEquals(
+ set_response["body"]["breakpoints"],
+ [{"verified": True}, {"verified": True}],
+ )
+
+ self.continue_to_next_stop()
+ x_val = self.dap_server.get_local_variable_value("x")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(x_val, "2")
+ self.assertEquals(i_val, "1")
+
+ self.continue_to_next_stop()
+ arr_2 = self.dap_server.get_local_variable_child("arr", "[2]")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(arr_2["value"], "42")
+ self.assertEquals(i_val, "2")
+
+ @skipIfWindows
+ @skipIfRemote
+ def test_functionality(self):
+ """Tests setting data breakpoints on variable."""
+ program = self.getBuildArtifact("a.out")
+ self.build_and_launch(program)
+ source = "main.cpp"
+ first_loop_break_line = line_number(source, "// first loop breakpoint")
+ self.set_source_breakpoints(source, [first_loop_break_line])
+ self.continue_to_next_stop()
+ self.dap_server.get_local_variables()
+ # Test write watchpoints on x, arr[2]
+ response_x = self.dap_server.request_dataBreakpointInfo(1, "x")
+ arr = self.dap_server.get_local_variable("arr")
+ response_arr_2 = self.dap_server.request_dataBreakpointInfo(
+ arr["variablesReference"], "[2]"
+ )
+
+ # Test response from dataBreakpointInfo request.
+ self.assertEquals(response_x["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_x["body"]["accessTypes"], self.accessTypes)
+ self.assertEquals(response_arr_2["body"]["dataId"].split("/")[1], "4")
+ self.assertEquals(response_arr_2["body"]["accessTypes"], self.accessTypes)
+ dataBreakpoints = [
+ {"dataId": response_x["body"]["dataId"], "accessType": "write"},
+ {"dataId": response_arr_2["body"]["dataId"], "accessType": "write"},
+ ]
+ set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints)
+ self.assertEquals(
+ set_response["body"]["breakpoints"],
+ [{"verified": True}, {"verified": True}],
+ )
+
+ self.continue_to_next_stop()
+ x_val = self.dap_server.get_local_variable_value("x")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(x_val, "2")
+ self.assertEquals(i_val, "1")
+
+ self.continue_to_next_stop()
+ arr_2 = self.dap_server.get_local_variable_child("arr", "[2]")
+ i_val = self.dap_server.get_local_variable_value("i")
+ self.assertEquals(arr_2["value"], "42")
+ self.assertEquals(i_val, "2")
+ self.dap_server.request_setDataBreakpoint([])
+
+ # Test hit condition
+ second_loop_break_line = line_number(source, "// second loop breakpoint")
+ breakpoint_ids = self.set_source_breakpoints(source, [second_loop_break_line])
+ self.continue_to_breakpoints(breakpoint_ids)
+ dataBreakpoints = [
+ {
+ "dataId": response_x["body"]["dataId"],
+ "accessType": "write",
+ "hitCondition": "2",
+ }
+ ]
+ set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints)
+ self.assertEquals(set_response["body"]["breakpoints"], [{"verified": True}])
+ self.continue_to_next_stop()
+ x_val = self.dap_server.get_local_variable_value("x")
+ self.assertEquals(x_val, "3")
+
+ # Test condition
+ dataBreakpoints = [
+ {
+ "dataId": response_x["body"]["dataId"],
+ "accessType": "write",
+ "condition": "x==10",
+ }
+ ]
+ set_response = self.dap_server.request_setDataBreakpoint(dataBreakpoints)
+ self.assertEquals(set_response["body"]["breakpoints"], [{"verified": True}])
+ self.continue_to_next_stop()
+ x_val = self.dap_server.get_local_variable_value("x")
+ self.assertEquals(x_val, "10")
diff --git a/lldb/test/API/tools/lldb-dap/databreakpoint/main.cpp b/lldb/test/API/tools/lldb-dap/databreakpoint/main.cpp
new file mode 100644
index 000000000000..bef09c203845
--- /dev/null
+++ b/lldb/test/API/tools/lldb-dap/databreakpoint/main.cpp
@@ -0,0 +1,17 @@
+int main(int argc, char const *argv[]) {
+ // Test for data breakpoint
+ int x = 0;
+ int arr[4] = {1, 2, 3, 4};
+ for (int i = 0; i < 5; ++i) { // first loop breakpoint
+ if (i == 1) {
+ x = i + 1;
+ } else if (i == 2) {
+ arr[i] = 42;
+ }
+ }
+
+ x = 1;
+ for (int i = 0; i < 10; ++i) { // second loop breakpoint
+ ++x;
+ }
+}
diff --git a/lldb/tools/lldb-dap/CMakeLists.txt b/lldb/tools/lldb-dap/CMakeLists.txt
index f8c0e4ecf36c..f8f0d86453f5 100644
--- a/lldb/tools/lldb-dap/CMakeLists.txt
+++ b/lldb/tools/lldb-dap/CMakeLists.txt
@@ -37,6 +37,7 @@ add_lldb_tool(lldb-dap
RunInTerminal.cpp
SourceBreakpoint.cpp
DAP.cpp
+ Watchpoint.cpp
LINK_LIBS
liblldb
diff --git a/lldb/tools/lldb-dap/DAPForward.h b/lldb/tools/lldb-dap/DAPForward.h
index fffff1e3f790..8c79488fae8d 100644
--- a/lldb/tools/lldb-dap/DAPForward.h
+++ b/lldb/tools/lldb-dap/DAPForward.h
@@ -14,6 +14,7 @@ struct BreakpointBase;
struct ExceptionBreakpoint;
struct FunctionBreakpoint;
struct SourceBreakpoint;
+struct Watchpoint;
} // namespace lldb_dap
namespace lldb {
@@ -39,6 +40,7 @@ class SBStringList;
class SBTarget;
class SBThread;
class SBValue;
+class SBWatchpoint;
} // namespace lldb
#endif
diff --git a/lldb/tools/lldb-dap/Watchpoint.cpp b/lldb/tools/lldb-dap/Watchpoint.cpp
new file mode 100644
index 000000000000..2f176e0da84f
--- /dev/null
+++ b/lldb/tools/lldb-dap/Watchpoint.cpp
@@ -0,0 +1,48 @@
+//===-- Watchpoint.cpp ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Watchpoint.h"
+#include "DAP.h"
+#include "JSONUtils.h"
+#include "llvm/ADT/StringExtras.h"
+
+namespace lldb_dap {
+Watchpoint::Watchpoint(const llvm::json::Object &obj) : BreakpointBase(obj) {
+ llvm::StringRef dataId = GetString(obj, "dataId");
+ std::string accessType = GetString(obj, "accessType").str();
+ auto [addr_str, size_str] = dataId.split('/');
+ lldb::addr_t addr;
+ size_t size;
+ llvm::to_integer(addr_str, addr, 16);
+ llvm::to_integer(size_str, size);
+ lldb::SBWatchpointOptions options;
+ options.SetWatchpointTypeRead(accessType != "write");
+ if (accessType != "read")
+ options.SetWatchpointTypeWrite(lldb::eWatchpointWriteTypeOnModify);
+ wp = g_dap.target.WatchpointCreateByAddress(addr, size, options, error);
+ SetCondition();
+ SetHitCondition();
+}
+
+void Watchpoint::SetCondition() { wp.SetCondition(condition.c_str()); }
+
+void Watchpoint::SetHitCondition() {
+ uint64_t hitCount = 0;
+ if (llvm::to_integer(hitCondition, hitCount))
+ wp.SetIgnoreCount(hitCount - 1);
+}
+
+void Watchpoint::CreateJsonObject(llvm::json::Object &object) {
+ if (error.Success()) {
+ object.try_emplace("verified", true);
+ } else {
+ object.try_emplace("verified", false);
+ EmplaceSafeString(object, "message", error.GetCString());
+ }
+}
+} // namespace lldb_dap
diff --git a/lldb/tools/lldb-dap/Watchpoint.h b/lldb/tools/lldb-dap/Watchpoint.h
new file mode 100644
index 000000000000..026b07d67241
--- /dev/null
+++ b/lldb/tools/lldb-dap/Watchpoint.h
@@ -0,0 +1,34 @@
+//===-- Watchpoint.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_TOOLS_LLDB_DAP_WATCHPOINT_H
+#define LLDB_TOOLS_LLDB_DAP_WATCHPOINT_H
+
+#include "BreakpointBase.h"
+#include "lldb/API/SBError.h"
+#include "lldb/API/SBWatchpoint.h"
+#include "lldb/API/SBWatchpointOptions.h"
+
+namespace lldb_dap {
+
+struct Watchpoint : public BreakpointBase {
+ // The LLDB breakpoint associated wit this watchpoint.
+ lldb::SBWatchpoint wp;
+ lldb::SBError error;
+
+ Watchpoint() = default;
+ Watchpoint(const llvm::json::Object &obj);
+ Watchpoint(lldb::SBWatchpoint wp) : wp(wp) {}
+
+ void SetCondition() override;
+ void SetHitCondition() override;
+ void CreateJsonObject(llvm::json::Object &object) override;
+};
+} // namespace lldb_dap
+
+#endif
diff --git a/lldb/tools/lldb-dap/lldb-dap.cpp b/lldb/tools/lldb-dap/lldb-dap.cpp
index 78b0b4078706..c6a275bcf814 100644
--- a/lldb/tools/lldb-dap/lldb-dap.cpp
+++ b/lldb/tools/lldb-dap/lldb-dap.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "DAP.h"
+#include "Watchpoint.h"
+#include "lldb/API/SBMemoryRegionInfo.h"
#include <cassert>
#include <climits>
@@ -560,6 +562,46 @@ void EventThreadFunction() {
}
}
+lldb::SBValue FindVariable(uint64_t variablesReference, llvm::StringRef name) {
+ lldb::SBValue variable;
+ if (lldb::SBValueList *top_scope = GetTopLevelScope(variablesReference)) {
+ bool is_duplicated_variable_name = name.contains(" @");
+ // variablesReference is one of our scopes, not an actual variable it is
+ // asking for a variable in locals or globals or registers
+ int64_t end_idx = top_scope->GetSize();
+ // Searching backward so that we choose the variable in closest scope
+ // among variables of the same name.
+ for (int64_t i = end_idx - 1; i >= 0; --i) {
+ lldb::SBValue curr_variable = top_scope->GetValueAtIndex(i);
+ std::string variable_name = CreateUniqueVariableNameForDisplay(
+ curr_variable, is_duplicated_variable_name);
+ if (variable_name == name) {
+ variable = curr_variable;
+ break;
+ }
+ }
+ } else {
+ // This is not under the globals or locals scope, so there are no duplicated
+ // names.
+
+ // We have a named item within an actual variable so we need to find it
+ // withing the container variable by name.
+ lldb::SBValue container = g_dap.variables.GetVariable(variablesReference);
+ variable = container.GetChildMemberWithName(name.data());
+ if (!variable.IsValid()) {
+ if (name.starts_with("[")) {
+ llvm::StringRef index_str(name.drop_front(1));
+ uint64_t index = 0;
+ if (!index_str.consumeInteger(0, index)) {
+ if (index_str == "]")
+ variable = container.GetChildAtIndex(index);
+ }
+ }
+ }
+ }
+ return variable;
+}
+
// Both attach and launch take a either a sourcePath or sourceMap
// argument (or neither), from which we need to set the target.source-map.
void SetSourceMapFromArguments(const llvm::json::Object &arguments) {
@@ -1647,6 +1689,8 @@ void request_initialize(const llvm::json::Object &request) {
body.try_emplace("supportsProgressReporting", true);
// The debug adapter supports 'logMessage' in breakpoint.
body.try_emplace("supportsLogPoints", true);
+ // The debug adapter supports data watchpoints.
+ body.try_emplace("supportsDataBreakpoints", true);
response.try_emplace("body", std::move(body));
g_dap.SendJSON(llvm::json::Value(std::move(response)));
@@ -2593,6 +2637,264 @@ void request_setFunctionBreakpoints(const llvm::json::Object &request) {
g_dap.SendJSON(llvm::json::Value(std::move(response)));
}
+// "DataBreakpointInfoRequest": {
+// "allOf": [ { "$ref": "#/definitions/Request" }, {
+// "type": "object",
+// "description": "Obtains information on a possible data breakpoint that
+// could be set on an expression or variable.\nClients should only call this
+// request if the corresponding capability `supportsDataBreakpoints` is
+// true.", "properties": {
+// "command": {
+// "type": "string",
+// "enum": [ "dataBreakpointInfo" ]
+// },
+// "arguments": {
+// "$ref": "#/definitions/DataBreakpointInfoArguments"
+// }
+// },
+// "required": [ "command", "arguments" ]
+// }]
+// },
+// "DataBreakpointInfoArguments": {
+// "type": "object",
+// "description": "Arguments for `dataBreakpointInfo` request.",
+// "properties": {
+// "variablesReference": {
+// "type": "integer",
+// "description": "Reference to the variable container if the data
+// breakpoint is requested for a child of the container. The
+// `variablesReference` must have been obtained in the current suspended
+// state. See 'Lifetime of Object References' in the Overview section for
+// details."
+// },
+// "name": {
+// "type": "string",
+// "description": "The name of the variable's child to obtain data
+// breakpoint information for.\nIf `variablesReference` isn't specified,
+// this can be an expression."
+// },
+// "frameId": {
+// "type": "integer",
+// "description": "When `name` is an expression, evaluate it in the scope
+// of this stack frame. If not specified, the expression is evaluated in
+// the global scope. When `variablesReference` is specified, this property
+// has no effect."
+// }
+// },
+// "required": [ "name" ]
+// },
+// "DataBreakpointInfoResponse": {
+// "allOf": [ { "$ref": "#/definitions/Response" }, {
+// "type": "object",
+// "description": "Response to `dataBreakpointInfo` request.",
+// "properties": {
+// "body": {
+// "type": "object",
+// "properties": {
+// "dataId": {
+// "type": [ "string", "null" ],
+// "description": "An identifier for the data on which a data
+// breakpoint can be registered with the `setDataBreakpoints`
+// request or null if no data breakpoint is available. If a
+// `variablesReference` or `frameId` is passed, the `dataId` is
+// valid in the current suspended state, otherwise it's valid
+// indefinitely. See 'Lifetime of Object References' in the Overview
+// section for details. Breakpoints set using the `dataId` in the
+// `setDataBreakpoints` request may outlive the lifetime of the
+// associated `dataId`."
+// },
+// "description": {
+// "type": "string",
+// "description": "UI string that describes on what data the
+// breakpoint is set on or why a data breakpoint is not available."
+// },
+// "accessTypes": {
+// "type": "array",
+// "items": {
+// "$ref": "#/definitions/DataBreakpointAccessType"
+// },
+// "description": "Attribute lists the available access types for a
+// potential data breakpoint. A UI client could surface this
+// information."
+// },
+// "canPersist": {
+// "type": "boolean",
+// "description": "Attribute indicates that a potential data
+// breakpoint could be persisted across sessions."
+// }
+// },
+// "required": [ "dataId", "description" ]
+// }
+// },
+// "required": [ "body" ]
+// }]
+// }
+void request_dataBreakpointInfo(const llvm::json::Object &request) {
+ llvm::json::Object response;
+ FillResponse(request, response);
+ llvm::json::Object body;
+ lldb::SBError error;
+ llvm::json::Array accessTypes{"read", "write", "readWrite"};
+ const auto *arguments = request.getObject("arguments");
+ const auto variablesReference =
+ GetUnsigned(arguments, "variablesReference", 0);
+ llvm::StringRef name = GetString(arguments, "name");
+ lldb::SBFrame frame = g_dap.GetLLDBFrame(*arguments);
+ lldb::SBValue variable = FindVariable(variablesReference, name);
+ std::string addr, size;
+
+ if (variable.IsValid()) {
+ lldb::addr_t load_addr = variable.GetLoadAddress();
+ size_t byte_size = variable.GetByteSize();
+ if (load_addr == LLDB_INVALID_ADDRESS) {
+ body.try_emplace("dataId", nullptr);
+ body.try_emplace("description",
+ "does not exist in memory, its location is " +
+ std::string(variable.GetLocation()));
+ } else if (byte_size == 0) {
+ body.try_emplace("dataId", nullptr);
+ body.try_emplace("description", "variable size is 0");
+ } else {
+ addr = llvm::utohexstr(load_addr);
+ size = llvm::utostr(byte_size);
+ }
+ } else if (variablesReference == 0 && frame.IsValid()) {
+ lldb::SBValue value = frame.EvaluateExpression(name.data());
+ if (value.GetError().Fail()) {
+ lldb::SBError error = value.GetError();
+ const char *error_cstr = error.GetCString();
+ body.try_emplace("dataId", nullptr);
+ body.try_emplace("description", error_cstr && error_cstr[0]
+ ? std::string(error_cstr)
+ : "evaluation failed");
+ } else {
+ uint64_t load_addr = value.GetValueAsUnsigned();
+ addr = llvm::utohexstr(load_addr);
+ lldb::SBMemoryRegionInfo region;
+ lldb::SBError err =
+ g_dap.target.GetProcess().GetMemoryRegionInfo(load_addr, region);
+ if (err.Success()) {
+ if (!(region.IsReadable() || region.IsWritable())) {
+ body.try_emplace("dataId", nullptr);
+ body.try_emplace("description",
+ "memory region for address " + addr +
+ " has no read or write permissions");
+ } else {
+ lldb::SBData data = value.GetPointeeData();
+ if (data.IsValid())
+ size = llvm::utostr(data.GetByteSize());
+ else {
+ body.try_emplace("dataId", nullptr);
+ body.try_emplace("description",
+ "unable to get byte size for expression: " +
+ name.str());
+ }
+ }
+ } else {
+ body.try_emplace("dataId", nullptr);
+ body.try_emplace("description",
+ "unable to get memory region info for address " +
+ addr);
+ }
+ }
+ } else {
+ body.try_emplace("dataId", nullptr);
+ body.try_emplace("description", "variable not found: " + name.str());
+ }
+
+ if (!body.getObject("dataId")) {
+ body.try_emplace("dataId", addr + "/" + size);
+ body.try_emplace("accessTypes", std::move(accessTypes));
+ body.try_emplace("description",
+ size + " bytes at " + addr + " " + name.str());
+ }
+ response.try_emplace("body", std::move(body));
+ g_dap.SendJSON(llvm::json::Value(std::move(response)));
+}
+
+// "SetDataBreakpointsRequest": {
+// "allOf": [ { "$ref": "#/definitions/Request" }, {
+// "type": "object",
+// "description": "Replaces all existing data breakpoints with new data
+// breakpoints.\nTo clear all data breakpoints, specify an empty
+// array.\nWhen a data breakpoint is hit, a `stopped` event (with reason
+// `data breakpoint`) is generated.\nClients should only call this request
+// if the corresponding capability `supportsDataBreakpoints` is true.",
+// "properties": {
+// "command": {
+// "type": "string",
+// "enum": [ "setDataBreakpoints" ]
+// },
+// "arguments": {
+// "$ref": "#/definitions/SetDataBreakpointsArguments"
+// }
+// },
+// "required": [ "command", "arguments" ]
+// }]
+// },
+// "SetDataBreakpointsArguments": {
+// "type": "object",
+// "description": "Arguments for `setDataBreakpoints` request.",
+// "properties": {
+// "breakpoints": {
+// "type": "array",
+// "items": {
+// "$ref": "#/definitions/DataBreakpoint"
+// },
+// "description": "The contents of this array replaces all existing data
+// breakpoints. An empty array clears all data breakpoints."
+// }
+// },
+// "required": [ "breakpoints" ]
+// },
+// "SetDataBreakpointsResponse": {
+// "allOf": [ { "$ref": "#/definitions/Response" }, {
+// "type": "object",
+// "description": "Response to `setDataBreakpoints` request.\nReturned is
+// information about each breakpoint created by this request.",
+// "properties": {
+// "body": {
+// "type": "object",
+// "properties": {
+// "breakpoints": {
+// "type": "array",
+// "items": {
+// "$ref": "#/definitions/Breakpoint"
+// },
+// "description": "Information about the data breakpoints. The array
+// elements correspond to the elements of the input argument
+// `breakpoints` array."
+// }
+// },
+// "required": [ "breakpoints" ]
+// }
+// },
+// "required": [ "body" ]
+// }]
+// }
+void request_setDataBreakpoints(const llvm::json::Object &request) {
+ llvm::json::Object response;
+ lldb::SBError error;
+ FillResponse(request, response);
+ const auto *arguments = request.getObject("arguments");
+ const auto *breakpoints = arguments->getArray("breakpoints");
+ llvm::json::Array response_breakpoints;
+ g_dap.target.DeleteAllWatchpoints();
+ if (breakpoints) {
+ for (const auto &bp : *breakpoints) {
+ const auto *bp_obj = bp.getAsObject();
+ if (bp_obj) {
+ Watchpoint wp(*bp_obj);
+ AppendBreakpoint(&wp, response_breakpoints);
+ }
+ }
+ }
+ llvm::json::Object body;
+ body.try_emplace("breakpoints", std::move(response_breakpoints));
+ response.try_emplace("body", std::move(body));
+ g_dap.SendJSON(llvm::json::Value(std::move(response)));
+}
+
// "SourceRequest": {
// "allOf": [ { "$ref": "#/definitions/Request" }, {
// "type": "object",
@@ -3076,7 +3378,6 @@ void request_setVariable(const llvm::json::Object &request) {
const auto variablesReference =
GetUnsigned(arguments, "variablesReference", 0);
llvm::StringRef name = GetString(arguments, "name");
- bool is_duplicated_variable_name = name.contains(" @");
const auto value = GetString(arguments, "value");
// Set success to false just in case we don't find the variable by name
@@ -3097,40 +3398,8 @@ void request_setVariable(const llvm::json::Object &request) {
const auto id_value = GetUnsigned(arguments, "id", UINT64_MAX);
if (id_value != UINT64_MAX) {
variable = g_dap.variables.GetVariable(id_value);
- } else if (lldb::SBValueList *top_scope =
- GetTopLevelScope(variablesReference)) {
- // variablesReference is one of our scopes, not an actual variable it is
- // asking for a variable in locals or globals or registers
- int64_t end_idx = top_scope->GetSize();
- // Searching backward so that we choose the variable in closest scope
- // among variables of the same name.
- for (int64_t i = end_idx - 1; i >= 0; --i) {
- lldb::SBValue curr_variable = top_scope->GetValueAtIndex(i);
- std::string variable_name = CreateUniqueVariableNameForDisplay(
- curr_variable, is_duplicated_variable_name);
- if (variable_name == name) {
- variable = curr_variable;
- break;
- }
- }
} else {
- // This is not under the globals or locals scope, so there are no duplicated
- // names.
-
- // We have a named item within an actual variable so we need to find it
- // withing the container variable by name.
- lldb::SBValue container = g_dap.variables.GetVariable(variablesReference);
- variable = container.GetChildMemberWithName(name.data());
- if (!variable.IsValid()) {
- if (name.starts_with("[")) {
- llvm::StringRef index_str(name.drop_front(1));
- uint64_t index = 0;
- if (!index_str.consumeInteger(0, index)) {
- if (index_str == "]")
- variable = container.GetChildAtIndex(index);
- }
- }
- }
+ variable = FindVariable(variablesReference, name);
}
if (variable.IsValid()) {
@@ -3613,6 +3882,10 @@ void RegisterRequestCallbacks() {
request_setExceptionBreakpoints);
g_dap.RegisterRequestCallback("setFunctionBreakpoints",
request_setFunctionBreakpoints);
+ g_dap.RegisterRequestCallback("dataBreakpointInfo",
+ request_dataBreakpointInfo);
+ g_dap.RegisterRequestCallback("setDataBreakpoints",
+ request_setDataBreakpoints);
g_dap.RegisterRequestCallback("setVariable", request_setVariable);
g_dap.RegisterRequestCallback("source", request_source);
g_dap.RegisterRequestCallback("stackTrace", request_stackTrace);
diff --git a/lldb/unittests/Interpreter/CMakeLists.txt b/lldb/unittests/Interpreter/CMakeLists.txt
index 5b5268ffe973..54cea995084d 100644
--- a/lldb/unittests/Interpreter/CMakeLists.txt
+++ b/lldb/unittests/Interpreter/CMakeLists.txt
@@ -2,6 +2,7 @@ add_lldb_unittest(InterpreterTests
TestCommandPaths.cpp
TestCompletion.cpp
TestOptionArgParser.cpp
+ TestOptions.cpp
TestOptionValue.cpp
TestOptionValueFileColonLine.cpp
TestRegexCommand.cpp
diff --git a/lldb/unittests/Interpreter/TestOptions.cpp b/lldb/unittests/Interpreter/TestOptions.cpp
new file mode 100644
index 000000000000..93474e3c5713
--- /dev/null
+++ b/lldb/unittests/Interpreter/TestOptions.cpp
@@ -0,0 +1,29 @@
+//===-- TestOptions.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "lldb/Interpreter/Options.h"
+#include "gtest/gtest.h"
+
+#include "llvm/Testing/Support/Error.h"
+
+using namespace lldb_private;
+
+TEST(OptionsTest, CreateOptionParsingError) {
+ ASSERT_THAT_ERROR(
+ CreateOptionParsingError("yippee", 'f', "fun",
+ "unable to convert 'yippee' to boolean"),
+ llvm::FailedWithMessage("Invalid value ('yippee') for -f (fun): unable "
+ "to convert 'yippee' to boolean"));
+
+ ASSERT_THAT_ERROR(
+ CreateOptionParsingError("52", 'b', "bean-count"),
+ llvm::FailedWithMessage("Invalid value ('52') for -b (bean-count)"));
+
+ ASSERT_THAT_ERROR(CreateOptionParsingError("c", 'm'),
+ llvm::FailedWithMessage("Invalid value ('c') for -m"));
+}
diff --git a/lldb/unittests/ScriptInterpreter/Python/PythonDataObjectsTests.cpp b/lldb/unittests/ScriptInterpreter/Python/PythonDataObjectsTests.cpp
index a4db4627f935..b90fbb783099 100644
--- a/lldb/unittests/ScriptInterpreter/Python/PythonDataObjectsTests.cpp
+++ b/lldb/unittests/ScriptInterpreter/Python/PythonDataObjectsTests.cpp
@@ -11,6 +11,7 @@
#include "Plugins/ScriptInterpreter/Python/PythonDataObjects.h"
#include "Plugins/ScriptInterpreter/Python/ScriptInterpreterPython.h"
+#include "TestingSupport/SubsystemRAII.h"
#include "lldb/Host/File.h"
#include "lldb/Host/FileSystem.h"
#include "lldb/Host/HostInfo.h"
@@ -26,6 +27,8 @@ using namespace lldb_private::python;
using llvm::Expected;
class PythonDataObjectsTest : public PythonTestSuite {
+ SubsystemRAII<FileSystem> subsystems;
+
public:
void SetUp() override {
PythonTestSuite::SetUp();
@@ -209,8 +212,8 @@ TEST_F(PythonDataObjectsTest, TestPythonBoolean) {
};
// Test PythonBoolean constructed from long integer values.
- test_from_long(0); // Test 'false' value.
- test_from_long(1); // Test 'true' value.
+ test_from_long(0); // Test 'false' value.
+ test_from_long(1); // Test 'true' value.
test_from_long(~0); // Any value != 0 is 'true'.
}
@@ -811,7 +814,8 @@ main = foo
testing::ContainsRegex("line 7, in baz"),
testing::ContainsRegex("ZeroDivisionError")))));
-#if !((defined(_WIN32) || defined(_WIN64)) && (defined(__aarch64__) || defined(_M_ARM64)))
+#if !((defined(_WIN32) || defined(_WIN64)) && \
+ (defined(__aarch64__) || defined(_M_ARM64)))
static const char script2[] = R"(
class MyError(Exception):
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 98cef005f07e..f5f7d3f3253f 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -168,7 +168,20 @@ foreach(proj IN LISTS LLVM_ENABLE_RUNTIMES)
endif()
endforeach()
-if ("libc" IN_LIST LLVM_ENABLE_RUNTIMES)
+set(NEED_LIBC_HDRGEN FALSE)
+if("libc" IN_LIST LLVM_ENABLE_RUNTIMES)
+ set(NEED_LIBC_HDRGEN TRUE)
+else()
+ foreach(_name ${LLVM_RUNTIME_TARGETS})
+ if("libc" IN_LIST RUNTIMES_${_name}_LLVM_ENABLE_RUNTIMES)
+ set(NEED_LIBC_HDRGEN TRUE)
+ if("${_name}" STREQUAL "amdgcn-amd-amdhsa" OR "${_name}" STREQUAL "nvptx64-nvidia-cuda")
+ set(LLVM_LIBC_GPU_BUILD ON)
+ endif()
+ endif()
+ endforeach()
+endif()
+if(NEED_LIBC_HDRGEN)
# To build the libc runtime, we need to be able to build few libc build
# tools from the "libc" project. So, we add it to the list of enabled
# projects.
@@ -177,6 +190,7 @@ if ("libc" IN_LIST LLVM_ENABLE_RUNTIMES)
list(APPEND LLVM_ENABLE_PROJECTS "libc")
endif()
endif()
+unset(NEED_LIBC_HDRGEN)
# LLVM_ENABLE_PROJECTS_USED is `ON` if the user has ever used the
# `LLVM_ENABLE_PROJECTS` CMake cache variable. This exists for
diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake
index 486df22c2c1b..08ff49ded57a 100644
--- a/llvm/cmake/modules/HandleLLVMOptions.cmake
+++ b/llvm/cmake/modules/HandleLLVMOptions.cmake
@@ -120,7 +120,19 @@ if( LLVM_ENABLE_ASSERTIONS )
endif()
endif()
+# If we are targeting a GPU architecture in a runtimes build we want to ignore
+# all the standard flag handling.
+if("${LLVM_RUNTIMES_TARGET}" MATCHES "^amdgcn" OR
+ "${LLVM_RUNTIMES_TARGET}" MATCHES "^nvptx64")
+ return()
+endif()
+
if(LLVM_ENABLE_EXPENSIVE_CHECKS)
+ # When LLVM_ENABLE_EXPENSIVE_CHECKS is ON, LLVM will intercept errors
+ # using assert(). An explicit check is performed here.
+ if (NOT LLVM_ENABLE_ASSERTIONS)
+ message(FATAL_ERROR "LLVM_ENABLE_EXPENSIVE_CHECKS requires LLVM_ENABLE_ASSERTIONS \"ON\".")
+ endif()
add_compile_definitions(EXPENSIVE_CHECKS)
# In some libstdc++ versions, std::min_element is not constexpr when
diff --git a/llvm/docs/CommandGuide/llvm-readelf.rst b/llvm/docs/CommandGuide/llvm-readelf.rst
index 6ee4a5dfb159..675628fdda45 100644
--- a/llvm/docs/CommandGuide/llvm-readelf.rst
+++ b/llvm/docs/CommandGuide/llvm-readelf.rst
@@ -38,6 +38,11 @@ OPTIONS
Display the contents of the basic block address map section(s), which contain the
address of each function, along with the relative offset of each basic block.
+.. option:: --decompress, -z
+
+ Dump decompressed section content when used with ``-x`` or ``-p``.
+ If the section(s) are not compressed, they are displayed as is.
+
.. option:: --demangle, -C
Display demangled symbol names in the output.
diff --git a/llvm/docs/CommandGuide/llvm-readobj.rst b/llvm/docs/CommandGuide/llvm-readobj.rst
index cb9232ef5e56..6d78a0387234 100644
--- a/llvm/docs/CommandGuide/llvm-readobj.rst
+++ b/llvm/docs/CommandGuide/llvm-readobj.rst
@@ -56,6 +56,11 @@ file formats.
Display the address-significance table.
+.. option:: --decompress, -z
+
+ Dump decompressed section content when used with ``-x`` or ``-p``.
+ If the section(s) are not compressed, they are displayed as is.
+
.. option:: --expand-relocs
When used with :option:`--relocs`, display each relocation in an expanded
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index fd2e3aacd016..19ca9f6ae3fe 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -61,10 +61,13 @@ run by the parser after parsing input assembly and by the optimizer
before it outputs bitcode. The violations pointed out by the verifier
pass indicate bugs in transformation passes or input to the parser.
+Syntax
+======
+
.. _identifiers:
Identifiers
-===========
+-----------
LLVM identifiers come in two basic types: global and local. Global
identifiers (functions, global variables) begin with the ``'@'``
@@ -140,6 +143,34 @@ It also shows a convention that we follow in this document. When
demonstrating instructions, we will follow an instruction with a comment
that defines the type and name of value produced.
+.. _strings:
+
+String constants
+----------------
+
+Strings in LLVM programs are delimited by ``"`` characters. Within a
+string, all bytes are treated literally with the exception of ``\``
+characters, which start escapes, and the first ``"`` character, which
+ends the string.
+
+There are two kinds of escapes.
+
+* ``\\`` represents a single ``\`` character.
+
+* ``\`` followed by two hexadecimal characters (0-9, a-f, or A-F)
+ represents the byte with the given value (e.g. \x00 represents a
+ null byte).
+
+To represent a ``"`` character, use ``\22``. (``\"`` will end the string
+with a trailing ``\``.)
+
+Newlines do not terminate string constants; strings can span multiple
+lines.
+
+The interpretation of string constants (e.g. their character encoding)
+depends on context.
+
+
High Level Structure
====================
@@ -16718,6 +16749,7 @@ an operation is greater than the maximum value, the result is set (or
"clamped") to this maximum. If it is below the minimum, it is clamped to this
minimum.
+.. _int_sadd_sat:
'``llvm.sadd.sat.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -16767,6 +16799,8 @@ Examples
%res = call i4 @llvm.sadd.sat.i4(i4 -4, i4 -5) ; %res = -8
+.. _int_uadd_sat:
+
'``llvm.uadd.sat.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -16814,6 +16848,8 @@ Examples
%res = call i4 @llvm.uadd.sat.i4(i4 8, i4 8) ; %res = 15
+.. _int_ssub_sat:
+
'``llvm.ssub.sat.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -16862,6 +16898,8 @@ Examples
%res = call i4 @llvm.ssub.sat.i4(i4 4, i4 -5) ; %res = 7
+.. _int_usub_sat:
+
'``llvm.usub.sat.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -23579,6 +23617,202 @@ Examples:
%also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison
+.. _int_vp_sadd_sat:
+
+'``llvm.vp.sadd.sat.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+ declare <16 x i32> @llvm.vp.sadd.sat.v16i32 (<16 x i32> <left_op> <16 x i32> <right_op>, <16 x i1> <mask>, i32 <vector_length>)
+ declare <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32 (<vscale x 4 x i32> <left_op>, <vscale x 4 x i32> <right_op>, <vscale x 4 x i1> <mask>, i32 <vector_length>)
+ declare <256 x i64> @llvm.vp.sadd.sat.v256i64 (<256 x i64> <left_op>, <256 x i64> <right_op>, <256 x i1> <mask>, i32 <vector_length>)
+
+Overview:
+"""""""""
+
+Predicated signed saturating addition of two vectors of integers.
+
+
+Arguments:
+""""""""""
+
+The first two operands and the result have the same vector of integer type. The
+third operand is the vector mask and has the same number of elements as the
+result vector type. The fourth operand is the explicit vector length of the
+operation.
+
+Semantics:
+""""""""""
+
+The '``llvm.vp.sadd.sat``' intrinsic performs sadd.sat (:ref:`sadd.sat <int_sadd_sat>`)
+of the first and second vector operands on each enabled lane. The result on
+disabled lanes is a :ref:`poison value <poisonvalues>`.
+
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+ %r = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r
+
+ %t = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison
+
+
+.. _int_vp_uadd_sat:
+
+'``llvm.vp.uadd.sat.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+ declare <16 x i32> @llvm.vp.uadd.sat.v16i32 (<16 x i32> <left_op> <16 x i32> <right_op>, <16 x i1> <mask>, i32 <vector_length>)
+ declare <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32 (<vscale x 4 x i32> <left_op>, <vscale x 4 x i32> <right_op>, <vscale x 4 x i1> <mask>, i32 <vector_length>)
+ declare <256 x i64> @llvm.vp.uadd.sat.v256i64 (<256 x i64> <left_op>, <256 x i64> <right_op>, <256 x i1> <mask>, i32 <vector_length>)
+
+Overview:
+"""""""""
+
+Predicated unsigned saturating addition of two vectors of integers.
+
+
+Arguments:
+""""""""""
+
+The first two operands and the result have the same vector of integer type. The
+third operand is the vector mask and has the same number of elements as the
+result vector type. The fourth operand is the explicit vector length of the
+operation.
+
+Semantics:
+""""""""""
+
+The '``llvm.vp.uadd.sat``' intrinsic performs uadd.sat (:ref:`uadd.sat <int_uadd_sat>`)
+of the first and second vector operands on each enabled lane. The result on
+disabled lanes is a :ref:`poison value <poisonvalues>`.
+
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+ %r = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r
+
+ %t = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison
+
+
+.. _int_vp_ssub_sat:
+
+'``llvm.vp.ssub.sat.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+ declare <16 x i32> @llvm.vp.ssub.sat.v16i32 (<16 x i32> <left_op> <16 x i32> <right_op>, <16 x i1> <mask>, i32 <vector_length>)
+ declare <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32 (<vscale x 4 x i32> <left_op>, <vscale x 4 x i32> <right_op>, <vscale x 4 x i1> <mask>, i32 <vector_length>)
+ declare <256 x i64> @llvm.vp.ssub.sat.v256i64 (<256 x i64> <left_op>, <256 x i64> <right_op>, <256 x i1> <mask>, i32 <vector_length>)
+
+Overview:
+"""""""""
+
+Predicated signed saturating subtraction of two vectors of integers.
+
+
+Arguments:
+""""""""""
+
+The first two operands and the result have the same vector of integer type. The
+third operand is the vector mask and has the same number of elements as the
+result vector type. The fourth operand is the explicit vector length of the
+operation.
+
+Semantics:
+""""""""""
+
+The '``llvm.vp.ssub.sat``' intrinsic performs ssub.sat (:ref:`ssub.sat <int_ssub_sat>`)
+of the first and second vector operands on each enabled lane. The result on
+disabled lanes is a :ref:`poison value <poisonvalues>`.
+
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+ %r = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r
+
+ %t = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison
+
+
+.. _int_vp_usub_sat:
+
+'``llvm.vp.usub.sat.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+ declare <16 x i32> @llvm.vp.usub.sat.v16i32 (<16 x i32> <left_op> <16 x i32> <right_op>, <16 x i1> <mask>, i32 <vector_length>)
+ declare <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32 (<vscale x 4 x i32> <left_op>, <vscale x 4 x i32> <right_op>, <vscale x 4 x i1> <mask>, i32 <vector_length>)
+ declare <256 x i64> @llvm.vp.usub.sat.v256i64 (<256 x i64> <left_op>, <256 x i64> <right_op>, <256 x i1> <mask>, i32 <vector_length>)
+
+Overview:
+"""""""""
+
+Predicated unsigned saturating subtraction of two vectors of integers.
+
+
+Arguments:
+""""""""""
+
+The first two operands and the result have the same vector of integer type. The
+third operand is the vector mask and has the same number of elements as the
+result vector type. The fourth operand is the explicit vector length of the
+operation.
+
+Semantics:
+""""""""""
+
+The '``llvm.vp.usub.sat``' intrinsic performs usub.sat (:ref:`usub.sat <int_usub_sat>`)
+of the first and second vector operands on each enabled lane. The result on
+disabled lanes is a :ref:`poison value <poisonvalues>`.
+
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+ %r = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i1> %mask, i32 %evl)
+ ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r
+
+ %t = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %a, <4 x i32> %b)
+ %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison
+
+
.. _int_vp_fshl:
'``llvm.vp.fshl.*``' Intrinsics
diff --git a/llvm/include/llvm/ADT/iterator_range.h b/llvm/include/llvm/ADT/iterator_range.h
index 2dc227935984..7d288ea4506b 100644
--- a/llvm/include/llvm/ADT/iterator_range.h
+++ b/llvm/include/llvm/ADT/iterator_range.h
@@ -43,8 +43,8 @@ class iterator_range {
IteratorT begin_iterator, end_iterator;
public:
-#if __GNUC__ == 7
- // Be careful no to break gcc-7 on the mlir target.
+#if __GNUC__ == 7 || (__GNUC__ == 8 && __GNUC_MINOR__ < 4)
+ // Be careful no to break gcc-7 and gcc-8 < 8.4 on the mlir target.
// See https://github.com/llvm/llvm-project/issues/63843
template <typename Container>
#else
diff --git a/llvm/include/llvm/CodeGen/MachinePassManager.h b/llvm/include/llvm/CodeGen/MachinePassManager.h
index a0ad7d7a95a2..7713c55661cc 100644
--- a/llvm/include/llvm/CodeGen/MachinePassManager.h
+++ b/llvm/include/llvm/CodeGen/MachinePassManager.h
@@ -25,17 +25,18 @@
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/PassManager.h"
+#include "llvm/IR/PassManagerInternal.h"
#include "llvm/Support/Error.h"
-#include <map>
-
namespace llvm {
class Module;
class Function;
class MachineFunction;
extern template class AnalysisManager<MachineFunction>;
+using MachineFunctionAnalysisManager = AnalysisManager<MachineFunction>;
/// A CRTP mix-in that provides informational APIs needed for machine passes.
///
@@ -46,217 +47,247 @@ struct MachinePassInfoMixin : public PassInfoMixin<DerivedT> {
// TODO: Add MachineFunctionProperties support.
};
-/// An AnalysisManager<MachineFunction> that also exposes IR analysis results.
-class MachineFunctionAnalysisManager : public AnalysisManager<MachineFunction> {
-public:
- using Base = AnalysisManager<MachineFunction>;
+namespace detail {
+struct MachinePassConcept
+ : PassConcept<MachineFunction, MachineFunctionAnalysisManager> {
+ virtual MachineFunctionProperties getRequiredProperties() const = 0;
+ virtual MachineFunctionProperties getSetProperties() const = 0;
+ virtual MachineFunctionProperties getClearedProperties() const = 0;
+};
- MachineFunctionAnalysisManager() : FAM(nullptr), MAM(nullptr) {}
- MachineFunctionAnalysisManager(FunctionAnalysisManager &FAM,
- ModuleAnalysisManager &MAM)
- : FAM(&FAM), MAM(&MAM) {}
- MachineFunctionAnalysisManager(MachineFunctionAnalysisManager &&) = default;
- MachineFunctionAnalysisManager &
- operator=(MachineFunctionAnalysisManager &&) = default;
+template <typename PassT> struct MachinePassModel : MachinePassConcept {
+ explicit MachinePassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ MachinePassModel(const MachinePassModel &Arg) : Pass(Arg.Pass) {}
+ MachinePassModel(MachinePassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
- /// Get the result of an analysis pass for a Function.
- ///
- /// Runs the analysis if a cached result is not available.
- template <typename PassT> typename PassT::Result &getResult(Function &F) {
- return FAM->getResult<PassT>(F);
+ friend void swap(MachinePassModel &LHS, MachinePassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
}
- /// Get the cached result of an analysis pass for a Function.
- ///
- /// This method never runs the analysis.
- ///
- /// \returns null if there is no cached result.
- template <typename PassT>
- typename PassT::Result *getCachedResult(Function &F) {
- return FAM->getCachedResult<PassT>(F);
+ MachinePassModel &operator=(MachinePassModel RHS) {
+ swap(*this, RHS);
+ return *this;
}
- /// Get the result of an analysis pass for a Module.
- ///
- /// Runs the analysis if a cached result is not available.
- template <typename PassT> typename PassT::Result &getResult(Module &M) {
- return MAM->getResult<PassT>(M);
+ PreservedAnalyses run(MachineFunction &IR,
+ MachineFunctionAnalysisManager &AM) override {
+ return Pass.run(IR, AM);
}
- /// Get the cached result of an analysis pass for a Module.
- ///
- /// This method never runs the analysis.
- ///
- /// \returns null if there is no cached result.
- template <typename PassT> typename PassT::Result *getCachedResult(Module &M) {
- return MAM->getCachedResult<PassT>(M);
+ void printPipeline(
+ raw_ostream &OS,
+ function_ref<StringRef(StringRef)> MapClassName2PassName) override {
+ Pass.printPipeline(OS, MapClassName2PassName);
}
- /// Get the result of an analysis pass for a MachineFunction.
- ///
- /// Runs the analysis if a cached result is not available.
- using Base::getResult;
+ StringRef name() const override { return PassT::name(); }
- /// Get the cached result of an analysis pass for a MachineFunction.
- ///
- /// This method never runs the analysis.
- ///
- /// returns null if there is no cached result.
- using Base::getCachedResult;
-
- // FIXME: Add LoopAnalysisManager or CGSCCAnalysisManager if needed.
- FunctionAnalysisManager *FAM;
- ModuleAnalysisManager *MAM;
-};
+ template <typename T>
+ using has_required_t = decltype(std::declval<T &>().isRequired());
+ template <typename T>
+ static std::enable_if_t<is_detected<has_required_t, T>::value, bool>
+ passIsRequiredImpl() {
+ return T::isRequired();
+ }
+ template <typename T>
+ static std::enable_if_t<!is_detected<has_required_t, T>::value, bool>
+ passIsRequiredImpl() {
+ return false;
+ }
+ bool isRequired() const override { return passIsRequiredImpl<PassT>(); }
+
+ template <typename T>
+ using has_get_required_properties_t =
+ decltype(std::declval<T &>().getRequiredProperties());
+ template <typename T>
+ static std::enable_if_t<is_detected<has_get_required_properties_t, T>::value,
+ MachineFunctionProperties>
+ getRequiredPropertiesImpl() {
+ return PassT::getRequiredProperties();
+ }
+ template <typename T>
+ static std::enable_if_t<!is_detected<has_get_required_properties_t, T>::value,
+ MachineFunctionProperties>
+ getRequiredPropertiesImpl() {
+ return MachineFunctionProperties();
+ }
+ MachineFunctionProperties getRequiredProperties() const override {
+ return getRequiredPropertiesImpl<PassT>();
+ }
-extern template class PassManager<MachineFunction>;
+ template <typename T>
+ using has_get_set_properties_t =
+ decltype(std::declval<T &>().getSetProperties());
+ template <typename T>
+ static std::enable_if_t<is_detected<has_get_set_properties_t, T>::value,
+ MachineFunctionProperties>
+ getSetPropertiesImpl() {
+ return PassT::getSetProperties();
+ }
+ template <typename T>
+ static std::enable_if_t<!is_detected<has_get_set_properties_t, T>::value,
+ MachineFunctionProperties>
+ getSetPropertiesImpl() {
+ return MachineFunctionProperties();
+ }
+ MachineFunctionProperties getSetProperties() const override {
+ return getSetPropertiesImpl<PassT>();
+ }
-/// MachineFunctionPassManager adds/removes below features to/from the base
-/// PassManager template instantiation.
-///
-/// - Support passes that implement doInitialization/doFinalization. This is for
-/// machine function passes to work on module level constructs. One such pass
-/// is AsmPrinter.
-///
-/// - Support machine module pass which runs over the module (for example,
-/// MachineOutliner). A machine module pass needs to define the method:
-///
-/// ```Error run(Module &, MachineFunctionAnalysisManager &)```
-///
-/// FIXME: machine module passes still need to define the usual machine
-/// function pass interface, namely,
-/// `PreservedAnalyses run(MachineFunction &,
-/// MachineFunctionAnalysisManager &)`
-/// But this interface wouldn't be executed. It is just a placeholder
-/// to satisfy the pass manager type-erased inteface. This
-/// special-casing of machine module pass is due to its limited use
-/// cases and the unnecessary complexity it may bring to the machine
-/// pass manager.
-///
-/// - The base class `run` method is replaced by an alternative `run` method.
-/// See details below.
-///
-/// - Support codegening in the SCC order. Users include interprocedural
-/// register allocation (IPRA).
-class MachineFunctionPassManager
- : public PassManager<MachineFunction, MachineFunctionAnalysisManager> {
- using Base = PassManager<MachineFunction, MachineFunctionAnalysisManager>;
+ template <typename T>
+ using has_get_cleared_properties_t =
+ decltype(std::declval<T &>().getClearedProperties());
+ template <typename T>
+ static std::enable_if_t<is_detected<has_get_cleared_properties_t, T>::value,
+ MachineFunctionProperties>
+ getClearedPropertiesImpl() {
+ return PassT::getClearedProperties();
+ }
+ template <typename T>
+ static std::enable_if_t<!is_detected<has_get_cleared_properties_t, T>::value,
+ MachineFunctionProperties>
+ getClearedPropertiesImpl() {
+ return MachineFunctionProperties();
+ }
+ MachineFunctionProperties getClearedProperties() const override {
+ return getClearedPropertiesImpl<PassT>();
+ }
+ PassT Pass;
+};
+} // namespace detail
+
+using MachineFunctionAnalysisManagerModuleProxy =
+ InnerAnalysisManagerProxy<MachineFunctionAnalysisManager, Module>;
+
+template <>
+bool MachineFunctionAnalysisManagerModuleProxy::Result::invalidate(
+ Module &M, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &Inv);
+extern template class InnerAnalysisManagerProxy<MachineFunctionAnalysisManager,
+ Module>;
+
+extern template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
+ MachineFunction>;
+/// Provide the \c ModuleAnalysisManager to \c Function proxy.
+using ModuleAnalysisManagerMachineFunctionProxy =
+ OuterAnalysisManagerProxy<ModuleAnalysisManager, MachineFunction>;
+
+class FunctionAnalysisManagerMachineFunctionProxy
+ : public AnalysisInfoMixin<FunctionAnalysisManagerMachineFunctionProxy> {
public:
- MachineFunctionPassManager(bool RequireCodeGenSCCOrder = false,
- bool VerifyMachineFunction = false)
- : RequireCodeGenSCCOrder(RequireCodeGenSCCOrder),
- VerifyMachineFunction(VerifyMachineFunction) {}
- MachineFunctionPassManager(MachineFunctionPassManager &&) = default;
- MachineFunctionPassManager &
- operator=(MachineFunctionPassManager &&) = default;
-
- /// Run machine passes for a Module.
+ class Result {
+ public:
+ explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+
+ Result(Result &&Arg) : FAM(std::move(Arg.FAM)) {
+ // We have to null out the analysis manager in the moved-from state
+ // because we are taking ownership of the responsibilty to clear the
+ // analysis state.
+ Arg.FAM = nullptr;
+ }
+
+ ~Result() {
+ // FAM is cleared in a moved from state where there is nothing to do.
+ if (!FAM)
+ return;
+
+ // Clear out the analysis manager if we're being destroyed -- it means we
+ // didn't even see an invalidate call when we got invalidated.
+ FAM->clear();
+ }
+
+ Result &operator=(Result &&RHS) {
+ FAM = RHS.FAM;
+ // We have to null out the analysis manager in the moved-from state
+ // because we are taking ownership of the responsibilty to clear the
+ // analysis state.
+ RHS.FAM = nullptr;
+ return *this;
+ }
+
+ /// Accessor for the analysis manager.
+ FunctionAnalysisManager &getManager() { return *FAM; }
+
+ /// Handler for invalidation of the outer IR unit, \c IRUnitT.
+ ///
+ /// If the proxy analysis itself is not preserved, we assume that the set of
+ /// inner IR objects contained in IRUnit may have changed. In this case,
+ /// we have to call \c clear() on the inner analysis manager, as it may now
+ /// have stale pointers to its inner IR objects.
+ ///
+ /// Regardless of whether the proxy analysis is marked as preserved, all of
+ /// the analyses in the inner analysis manager are potentially invalidated
+ /// based on the set of preserved analyses.
+ bool invalidate(MachineFunction &IR, const PreservedAnalyses &PA,
+ MachineFunctionAnalysisManager::Invalidator &Inv);
+
+ private:
+ FunctionAnalysisManager *FAM;
+ };
+
+ explicit FunctionAnalysisManagerMachineFunctionProxy(
+ FunctionAnalysisManager &FAM)
+ : FAM(&FAM) {}
+
+ /// Run the analysis pass and create our proxy result object.
///
- /// The intended use is to start the codegen pipeline for a Module. The base
- /// class's `run` method is deliberately hidden by this due to the observation
- /// that we don't yet have the use cases of compositing two instances of
- /// machine pass managers, or compositing machine pass managers with other
- /// types of pass managers.
- Error run(Module &M, MachineFunctionAnalysisManager &MFAM);
-
- template <typename PassT> void addPass(PassT &&Pass) {
- Base::addPass(std::forward<PassT>(Pass));
- PassConceptT *P = Passes.back().get();
- addDoInitialization<PassT>(P);
- addDoFinalization<PassT>(P);
-
- // Add machine module pass.
- addRunOnModule<PassT>(P);
+ /// This doesn't do any interesting work; it is primarily used to insert our
+ /// proxy result object into the outer analysis cache so that we can proxy
+ /// invalidation to the inner analysis manager.
+ Result run(MachineFunction &, MachineFunctionAnalysisManager &) {
+ return Result(*FAM);
}
-private:
- template <typename PassT>
- using has_init_t = decltype(std::declval<PassT &>().doInitialization(
- std::declval<Module &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- std::enable_if_t<!is_detected<has_init_t, PassT>::value>
- addDoInitialization(PassConceptT *Pass) {}
-
- template <typename PassT>
- std::enable_if_t<is_detected<has_init_t, PassT>::value>
- addDoInitialization(PassConceptT *Pass) {
- using PassModelT = detail::PassModel<MachineFunction, PassT,
- MachineFunctionAnalysisManager>;
- auto *P = static_cast<PassModelT *>(Pass);
- InitializationFuncs.emplace_back(
- [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
- return P->Pass.doInitialization(M, MFAM);
- });
- }
+ static AnalysisKey Key;
- template <typename PassT>
- using has_fini_t = decltype(std::declval<PassT &>().doFinalization(
- std::declval<Module &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- std::enable_if_t<!is_detected<has_fini_t, PassT>::value>
- addDoFinalization(PassConceptT *Pass) {}
-
- template <typename PassT>
- std::enable_if_t<is_detected<has_fini_t, PassT>::value>
- addDoFinalization(PassConceptT *Pass) {
- using PassModelT = detail::PassModel<MachineFunction, PassT,
- MachineFunctionAnalysisManager>;
- auto *P = static_cast<PassModelT *>(Pass);
- FinalizationFuncs.emplace_back(
- [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
- return P->Pass.doFinalization(M, MFAM);
- });
- }
+private:
+ FunctionAnalysisManager *FAM;
+};
- template <typename PassT>
- using is_machine_module_pass_t = decltype(std::declval<PassT &>().run(
- std::declval<Module &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- using is_machine_function_pass_t = decltype(std::declval<PassT &>().run(
- std::declval<MachineFunction &>(),
- std::declval<MachineFunctionAnalysisManager &>()));
-
- template <typename PassT>
- std::enable_if_t<!is_detected<is_machine_module_pass_t, PassT>::value>
- addRunOnModule(PassConceptT *Pass) {}
-
- template <typename PassT>
- std::enable_if_t<is_detected<is_machine_module_pass_t, PassT>::value>
- addRunOnModule(PassConceptT *Pass) {
- static_assert(is_detected<is_machine_function_pass_t, PassT>::value,
- "machine module pass needs to define machine function pass "
- "api. sorry.");
-
- using PassModelT = detail::PassModel<MachineFunction, PassT,
- MachineFunctionAnalysisManager>;
- auto *P = static_cast<PassModelT *>(Pass);
- MachineModulePasses.emplace(
- Passes.size() - 1,
- [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
- return P->Pass.run(M, MFAM);
- });
- }
+class ModuleToMachineFunctionPassAdaptor
+ : public PassInfoMixin<ModuleToMachineFunctionPassAdaptor> {
+ using MachinePassConcept = detail::MachinePassConcept;
- using FuncTy = Error(Module &, MachineFunctionAnalysisManager &);
- SmallVector<llvm::unique_function<FuncTy>, 4> InitializationFuncs;
- SmallVector<llvm::unique_function<FuncTy>, 4> FinalizationFuncs;
+public:
+ explicit ModuleToMachineFunctionPassAdaptor(
+ std::unique_ptr<MachinePassConcept> Pass)
+ : Pass(std::move(Pass)) {}
- using PassIndex = decltype(Passes)::size_type;
- std::map<PassIndex, llvm::unique_function<FuncTy>> MachineModulePasses;
+ /// Runs the function pass across every function in the module.
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
+ void printPipeline(raw_ostream &OS,
+ function_ref<StringRef(StringRef)> MapClassName2PassName);
- // Run codegen in the SCC order.
- bool RequireCodeGenSCCOrder;
+ static bool isRequired() { return true; }
- bool VerifyMachineFunction;
+private:
+ std::unique_ptr<MachinePassConcept> Pass;
};
+template <typename MachineFunctionPassT>
+ModuleToMachineFunctionPassAdaptor
+createModuleToMachineFunctionPassAdaptor(MachineFunctionPassT &&Pass) {
+ using PassModelT = detail::MachinePassModel<MachineFunctionPassT>;
+ // Do not use make_unique, it causes too many template instantiations,
+ // causing terrible compile times.
+ return ModuleToMachineFunctionPassAdaptor(
+ std::unique_ptr<detail::MachinePassConcept>(
+ new PassModelT(std::forward<MachineFunctionPassT>(Pass))));
+}
+
+template <>
+PreservedAnalyses
+PassManager<MachineFunction>::run(MachineFunction &,
+ AnalysisManager<MachineFunction> &);
+extern template class PassManager<MachineFunction>;
+
+/// Convenience typedef for a pass manager over functions.
+using MachineFunctionPassManager = PassManager<MachineFunction>;
+
} // end namespace llvm
#endif // LLVM_CODEGEN_MACHINEPASSMANAGER_H
diff --git a/llvm/include/llvm/CodeGen/SDPatternMatch.h b/llvm/include/llvm/CodeGen/SDPatternMatch.h
new file mode 100644
index 000000000000..412bf42677cc
--- /dev/null
+++ b/llvm/include/llvm/CodeGen/SDPatternMatch.h
@@ -0,0 +1,694 @@
+//==--------------- llvm/CodeGen/SDPatternMatch.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Contains matchers for matching SelectionDAG nodes and values.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SDPATTERNMATCH_H
+#define LLVM_CODEGEN_SDPATTERNMATCH_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/TargetLowering.h"
+
+namespace llvm {
+namespace SDPatternMatch {
+
+/// MatchContext can repurpose existing patterns to behave differently under
+/// a certain context. For instance, `m_Opc(ISD::ADD)` matches plain ADD nodes
+/// in normal circumstances, but matches VP_ADD nodes under a custom
+/// VPMatchContext. This design is meant to facilitate code / pattern reusing.
+class BasicMatchContext {
+ const SelectionDAG *DAG;
+ const TargetLowering *TLI;
+
+public:
+ explicit BasicMatchContext(const SelectionDAG *DAG)
+ : DAG(DAG), TLI(DAG ? &DAG->getTargetLoweringInfo() : nullptr) {}
+
+ explicit BasicMatchContext(const TargetLowering *TLI)
+ : DAG(nullptr), TLI(TLI) {}
+
+ // A valid MatchContext has to implement the following functions.
+
+ const SelectionDAG *getDAG() const { return DAG; }
+
+ const TargetLowering *getTLI() const { return TLI; }
+
+ /// Return true if N effectively has opcode Opcode.
+ bool match(SDValue N, unsigned Opcode) const {
+ return N->getOpcode() == Opcode;
+ }
+};
+
+template <typename Pattern, typename MatchContext>
+[[nodiscard]] bool sd_context_match(SDValue N, const MatchContext &Ctx,
+ Pattern &&P) {
+ return P.match(Ctx, N);
+}
+
+template <typename Pattern, typename MatchContext>
+[[nodiscard]] bool sd_context_match(SDNode *N, const MatchContext &Ctx,
+ Pattern &&P) {
+ return sd_context_match(SDValue(N, 0), Ctx, P);
+}
+
+template <typename Pattern>
+[[nodiscard]] bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P) {
+ return sd_context_match(N, BasicMatchContext(DAG), P);
+}
+
+template <typename Pattern>
+[[nodiscard]] bool sd_match(SDValue N, const SelectionDAG *DAG, Pattern &&P) {
+ return sd_context_match(N, BasicMatchContext(DAG), P);
+}
+
+template <typename Pattern>
+[[nodiscard]] bool sd_match(SDNode *N, Pattern &&P) {
+ return sd_match(N, nullptr, P);
+}
+
+template <typename Pattern>
+[[nodiscard]] bool sd_match(SDValue N, Pattern &&P) {
+ return sd_match(N, nullptr, P);
+}
+
+// === Utilities ===
+struct Value_match {
+ SDValue MatchVal;
+
+ Value_match() = default;
+
+ explicit Value_match(SDValue Match) : MatchVal(Match) {}
+
+ template <typename MatchContext> bool match(const MatchContext &, SDValue N) {
+ if (MatchVal)
+ return MatchVal == N;
+ return N.getNode();
+ }
+};
+
+/// Match any valid SDValue.
+inline Value_match m_Value() { return Value_match(); }
+
+inline Value_match m_Specific(SDValue N) {
+ assert(N);
+ return Value_match(N);
+}
+
+struct DeferredValue_match {
+ SDValue &MatchVal;
+
+ explicit DeferredValue_match(SDValue &Match) : MatchVal(Match) {}
+
+ template <typename MatchContext> bool match(const MatchContext &, SDValue N) {
+ return N == MatchVal;
+ }
+};
+
+/// Similar to m_Specific, but the specific value to match is determined by
+/// another sub-pattern in the same sd_match() expression. For instance,
+/// We cannot match `(add V, V)` with `m_Add(m_Value(X), m_Specific(X))` since
+/// `X` is not initialized at the time it got copied into `m_Specific`. Instead,
+/// we should use `m_Add(m_Value(X), m_Deferred(X))`.
+inline DeferredValue_match m_Deferred(SDValue &V) {
+ return DeferredValue_match(V);
+}
+
+struct Opcode_match {
+ unsigned Opcode;
+
+ explicit Opcode_match(unsigned Opc) : Opcode(Opc) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ return Ctx.match(N, Opcode);
+ }
+};
+
+inline Opcode_match m_Opc(unsigned Opcode) { return Opcode_match(Opcode); }
+
+template <unsigned NumUses, typename Pattern> struct NUses_match {
+ Pattern P;
+
+ explicit NUses_match(const Pattern &P) : P(P) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ // SDNode::hasNUsesOfValue is pretty expensive when the SDNode produces
+ // multiple results, hence we check the subsequent pattern here before
+ // checking the number of value users.
+ return P.match(Ctx, N) && N->hasNUsesOfValue(NumUses, N.getResNo());
+ }
+};
+
+template <typename Pattern>
+inline NUses_match<1, Pattern> m_OneUse(const Pattern &P) {
+ return NUses_match<1, Pattern>(P);
+}
+template <unsigned N, typename Pattern>
+inline NUses_match<N, Pattern> m_NUses(const Pattern &P) {
+ return NUses_match<N, Pattern>(P);
+}
+
+inline NUses_match<1, Value_match> m_OneUse() {
+ return NUses_match<1, Value_match>(m_Value());
+}
+template <unsigned N> inline NUses_match<N, Value_match> m_NUses() {
+ return NUses_match<N, Value_match>(m_Value());
+}
+
+struct Value_bind {
+ SDValue &BindVal;
+
+ explicit Value_bind(SDValue &N) : BindVal(N) {}
+
+ template <typename MatchContext> bool match(const MatchContext &, SDValue N) {
+ BindVal = N;
+ return true;
+ }
+};
+
+inline Value_bind m_Value(SDValue &N) { return Value_bind(N); }
+
+template <typename Pattern, typename PredFuncT> struct TLI_pred_match {
+ Pattern P;
+ PredFuncT PredFunc;
+
+ TLI_pred_match(const PredFuncT &Pred, const Pattern &P)
+ : P(P), PredFunc(Pred) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ assert(Ctx.getTLI() && "TargetLowering is required for this pattern.");
+ return PredFunc(*Ctx.getTLI(), N) && P.match(Ctx, N);
+ }
+};
+
+// Explicit deduction guide.
+template <typename PredFuncT, typename Pattern>
+TLI_pred_match(const PredFuncT &Pred, const Pattern &P)
+ -> TLI_pred_match<Pattern, PredFuncT>;
+
+/// Match legal SDNodes based on the information provided by TargetLowering.
+template <typename Pattern> inline auto m_LegalOp(const Pattern &P) {
+ return TLI_pred_match{[](const TargetLowering &TLI, SDValue N) {
+ return TLI.isOperationLegal(N->getOpcode(),
+ N.getValueType());
+ },
+ P};
+}
+
+/// Switch to a different MatchContext for subsequent patterns.
+template <typename NewMatchContext, typename Pattern> struct SwitchContext {
+ const NewMatchContext &Ctx;
+ Pattern P;
+
+ template <typename OrigMatchContext>
+ bool match(const OrigMatchContext &, SDValue N) {
+ return P.match(Ctx, N);
+ }
+};
+
+template <typename MatchContext, typename Pattern>
+inline SwitchContext<MatchContext, Pattern> m_Context(const MatchContext &Ctx,
+ Pattern &&P) {
+ return SwitchContext<MatchContext, Pattern>{Ctx, std::move(P)};
+}
+
+// === Value type ===
+struct ValueType_bind {
+ EVT &BindVT;
+
+ explicit ValueType_bind(EVT &Bind) : BindVT(Bind) {}
+
+ template <typename MatchContext> bool match(const MatchContext &, SDValue N) {
+ BindVT = N.getValueType();
+ return true;
+ }
+};
+
+/// Retreive the ValueType of the current SDValue.
+inline ValueType_bind m_VT(EVT &VT) { return ValueType_bind(VT); }
+
+template <typename Pattern, typename PredFuncT> struct ValueType_match {
+ PredFuncT PredFunc;
+ Pattern P;
+
+ ValueType_match(const PredFuncT &Pred, const Pattern &P)
+ : PredFunc(Pred), P(P) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ return PredFunc(N.getValueType()) && P.match(Ctx, N);
+ }
+};
+
+// Explicit deduction guide.
+template <typename PredFuncT, typename Pattern>
+ValueType_match(const PredFuncT &Pred, const Pattern &P)
+ -> ValueType_match<Pattern, PredFuncT>;
+
+/// Match a specific ValueType.
+template <typename Pattern>
+inline auto m_SpecificVT(EVT RefVT, const Pattern &P) {
+ return ValueType_match{[=](EVT VT) { return VT == RefVT; }, P};
+}
+inline auto m_SpecificVT(EVT RefVT) {
+ return ValueType_match{[=](EVT VT) { return VT == RefVT; }, m_Value()};
+}
+
+inline auto m_Glue() { return m_SpecificVT(MVT::Glue); }
+inline auto m_OtherVT() { return m_SpecificVT(MVT::Other); }
+
+/// Match any integer ValueTypes.
+template <typename Pattern> inline auto m_IntegerVT(const Pattern &P) {
+ return ValueType_match{[](EVT VT) { return VT.isInteger(); }, P};
+}
+inline auto m_IntegerVT() {
+ return ValueType_match{[](EVT VT) { return VT.isInteger(); }, m_Value()};
+}
+
+/// Match any floating point ValueTypes.
+template <typename Pattern> inline auto m_FloatingPointVT(const Pattern &P) {
+ return ValueType_match{[](EVT VT) { return VT.isFloatingPoint(); }, P};
+}
+inline auto m_FloatingPointVT() {
+ return ValueType_match{[](EVT VT) { return VT.isFloatingPoint(); },
+ m_Value()};
+}
+
+/// Match any vector ValueTypes.
+template <typename Pattern> inline auto m_VectorVT(const Pattern &P) {
+ return ValueType_match{[](EVT VT) { return VT.isVector(); }, P};
+}
+inline auto m_VectorVT() {
+ return ValueType_match{[](EVT VT) { return VT.isVector(); }, m_Value()};
+}
+
+/// Match fixed-length vector ValueTypes.
+template <typename Pattern> inline auto m_FixedVectorVT(const Pattern &P) {
+ return ValueType_match{[](EVT VT) { return VT.isFixedLengthVector(); }, P};
+}
+inline auto m_FixedVectorVT() {
+ return ValueType_match{[](EVT VT) { return VT.isFixedLengthVector(); },
+ m_Value()};
+}
+
+/// Match scalable vector ValueTypes.
+template <typename Pattern> inline auto m_ScalableVectorVT(const Pattern &P) {
+ return ValueType_match{[](EVT VT) { return VT.isScalableVector(); }, P};
+}
+inline auto m_ScalableVectorVT() {
+ return ValueType_match{[](EVT VT) { return VT.isScalableVector(); },
+ m_Value()};
+}
+
+/// Match legal ValueTypes based on the information provided by TargetLowering.
+template <typename Pattern> inline auto m_LegalType(const Pattern &P) {
+ return TLI_pred_match{[](const TargetLowering &TLI, SDValue N) {
+ return TLI.isTypeLegal(N.getValueType());
+ },
+ P};
+}
+
+// === Patterns combinators ===
+template <typename... Preds> struct And {
+ template <typename MatchContext> bool match(const MatchContext &, SDValue N) {
+ return true;
+ }
+};
+
+template <typename Pred, typename... Preds>
+struct And<Pred, Preds...> : And<Preds...> {
+ Pred P;
+ And(Pred &&p, Preds &&...preds)
+ : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
+ }
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ return P.match(Ctx, N) && And<Preds...>::match(Ctx, N);
+ }
+};
+
+template <typename... Preds> struct Or {
+ template <typename MatchContext> bool match(const MatchContext &, SDValue N) {
+ return false;
+ }
+};
+
+template <typename Pred, typename... Preds>
+struct Or<Pred, Preds...> : Or<Preds...> {
+ Pred P;
+ Or(Pred &&p, Preds &&...preds)
+ : Or<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ return P.match(Ctx, N) || Or<Preds...>::match(Ctx, N);
+ }
+};
+
+template <typename... Preds> And<Preds...> m_AllOf(Preds &&...preds) {
+ return And<Preds...>(std::forward<Preds>(preds)...);
+}
+
+template <typename... Preds> Or<Preds...> m_AnyOf(Preds &&...preds) {
+ return Or<Preds...>(std::forward<Preds>(preds)...);
+}
+
+// === Generic node matching ===
+template <unsigned OpIdx, typename... OpndPreds> struct Operands_match {
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ // Returns false if there are more operands than predicates;
+ return N->getNumOperands() == OpIdx;
+ }
+};
+
+template <unsigned OpIdx, typename OpndPred, typename... OpndPreds>
+struct Operands_match<OpIdx, OpndPred, OpndPreds...>
+ : Operands_match<OpIdx + 1, OpndPreds...> {
+ OpndPred P;
+
+ Operands_match(OpndPred &&p, OpndPreds &&...preds)
+ : Operands_match<OpIdx + 1, OpndPreds...>(
+ std::forward<OpndPreds>(preds)...),
+ P(std::forward<OpndPred>(p)) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ if (OpIdx < N->getNumOperands())
+ return P.match(Ctx, N->getOperand(OpIdx)) &&
+ Operands_match<OpIdx + 1, OpndPreds...>::match(Ctx, N);
+
+ // This is the case where there are more predicates than operands.
+ return false;
+ }
+};
+
+template <typename... OpndPreds>
+auto m_Node(unsigned Opcode, OpndPreds &&...preds) {
+ return m_AllOf(m_Opc(Opcode), Operands_match<0, OpndPreds...>(
+ std::forward<OpndPreds>(preds)...));
+}
+
+/// Provide number of operands that are not chain or glue, as well as the first
+/// index of such operand.
+template <bool ExcludeChain> struct EffectiveOperands {
+ unsigned Size = 0;
+ unsigned FirstIndex = 0;
+
+ explicit EffectiveOperands(SDValue N) {
+ const unsigned TotalNumOps = N->getNumOperands();
+ FirstIndex = TotalNumOps;
+ for (unsigned I = 0; I < TotalNumOps; ++I) {
+ // Count the number of non-chain and non-glue nodes (we ignore chain
+ // and glue by default) and retreive the operand index offset.
+ EVT VT = N->getOperand(I).getValueType();
+ if (VT != MVT::Glue && VT != MVT::Other) {
+ ++Size;
+ if (FirstIndex == TotalNumOps)
+ FirstIndex = I;
+ }
+ }
+ }
+};
+
+template <> struct EffectiveOperands<false> {
+ unsigned Size = 0;
+ unsigned FirstIndex = 0;
+
+ explicit EffectiveOperands(SDValue N) : Size(N->getNumOperands()) {}
+};
+
+// === Binary operations ===
+template <typename LHS_P, typename RHS_P, bool Commutable = false,
+ bool ExcludeChain = false>
+struct BinaryOpc_match {
+ unsigned Opcode;
+ LHS_P LHS;
+ RHS_P RHS;
+
+ BinaryOpc_match(unsigned Opc, const LHS_P &L, const RHS_P &R)
+ : Opcode(Opc), LHS(L), RHS(R) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ if (sd_context_match(N, Ctx, m_Opc(Opcode))) {
+ EffectiveOperands<ExcludeChain> EO(N);
+ assert(EO.Size == 2);
+ return (LHS.match(Ctx, N->getOperand(EO.FirstIndex)) &&
+ RHS.match(Ctx, N->getOperand(EO.FirstIndex + 1))) ||
+ (Commutable && LHS.match(Ctx, N->getOperand(EO.FirstIndex + 1)) &&
+ RHS.match(Ctx, N->getOperand(EO.FirstIndex)));
+ }
+
+ return false;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_BinOp(unsigned Opc, const LHS &L,
+ const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(Opc, L, R);
+}
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, true> m_c_BinOp(unsigned Opc, const LHS &L,
+ const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, true>(Opc, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false, true>
+m_ChainedBinOp(unsigned Opc, const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false, true>(Opc, L, R);
+}
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, true, true>
+m_c_ChainedBinOp(unsigned Opc, const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, true, true>(Opc, L, R);
+}
+
+// Common binary operations
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, true> m_Add(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, true>(ISD::ADD, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_Sub(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::SUB, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, true> m_Mul(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, true>(ISD::MUL, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_UDiv(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::UDIV, L, R);
+}
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_SDiv(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::SDIV, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_URem(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::UREM, L, R);
+}
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_SRem(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::SREM, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_Shl(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::SHL, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_Sra(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::SRA, L, R);
+}
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_Srl(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::SRL, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, true> m_FAdd(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, true>(ISD::FADD, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_FSub(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::FSUB, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, true> m_FMul(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, true>(ISD::FMUL, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_FDiv(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::FDIV, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOpc_match<LHS, RHS, false> m_FRem(const LHS &L, const RHS &R) {
+ return BinaryOpc_match<LHS, RHS, false>(ISD::FREM, L, R);
+}
+
+// === Unary operations ===
+template <typename Opnd_P, bool ExcludeChain = false> struct UnaryOpc_match {
+ unsigned Opcode;
+ Opnd_P Opnd;
+
+ UnaryOpc_match(unsigned Opc, const Opnd_P &Op) : Opcode(Opc), Opnd(Op) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ if (sd_context_match(N, Ctx, m_Opc(Opcode))) {
+ EffectiveOperands<ExcludeChain> EO(N);
+ assert(EO.Size == 1);
+ return Opnd.match(Ctx, N->getOperand(EO.FirstIndex));
+ }
+
+ return false;
+ }
+};
+
+template <typename Opnd>
+inline UnaryOpc_match<Opnd> m_UnaryOp(unsigned Opc, const Opnd &Op) {
+ return UnaryOpc_match<Opnd>(Opc, Op);
+}
+template <typename Opnd>
+inline UnaryOpc_match<Opnd, true> m_ChainedUnaryOp(unsigned Opc,
+ const Opnd &Op) {
+ return UnaryOpc_match<Opnd, true>(Opc, Op);
+}
+
+template <typename Opnd> inline UnaryOpc_match<Opnd> m_ZExt(const Opnd &Op) {
+ return UnaryOpc_match<Opnd>(ISD::ZERO_EXTEND, Op);
+}
+
+template <typename Opnd> inline UnaryOpc_match<Opnd> m_SExt(const Opnd &Op) {
+ return UnaryOpc_match<Opnd>(ISD::SIGN_EXTEND, Op);
+}
+
+template <typename Opnd> inline UnaryOpc_match<Opnd> m_AnyExt(const Opnd &Op) {
+ return UnaryOpc_match<Opnd>(ISD::ANY_EXTEND, Op);
+}
+
+template <typename Opnd> inline UnaryOpc_match<Opnd> m_Trunc(const Opnd &Op) {
+ return UnaryOpc_match<Opnd>(ISD::TRUNCATE, Op);
+}
+
+// === Constants ===
+struct ConstantInt_match {
+ APInt *BindVal;
+
+ explicit ConstantInt_match(APInt *V) : BindVal(V) {}
+
+ template <typename MatchContext> bool match(const MatchContext &, SDValue N) {
+ // The logics here are similar to that in
+ // SelectionDAG::isConstantIntBuildVectorOrConstantInt, but the latter also
+ // treats GlobalAddressSDNode as a constant, which is difficult to turn into
+ // APInt.
+ if (auto *C = dyn_cast_or_null<ConstantSDNode>(N.getNode())) {
+ if (BindVal)
+ *BindVal = C->getAPIntValue();
+ return true;
+ }
+
+ APInt Discard;
+ return ISD::isConstantSplatVector(N.getNode(),
+ BindVal ? *BindVal : Discard);
+ }
+};
+/// Match any interger constants or splat of an integer constant.
+inline ConstantInt_match m_ConstInt() { return ConstantInt_match(nullptr); }
+/// Match any interger constants or splat of an integer constant; return the
+/// specific constant or constant splat value.
+inline ConstantInt_match m_ConstInt(APInt &V) { return ConstantInt_match(&V); }
+
+struct SpecificInt_match {
+ APInt IntVal;
+
+ explicit SpecificInt_match(APInt APV) : IntVal(std::move(APV)) {}
+
+ template <typename MatchContext>
+ bool match(const MatchContext &Ctx, SDValue N) {
+ APInt ConstInt;
+ if (sd_context_match(N, Ctx, m_ConstInt(ConstInt)))
+ return APInt::isSameValue(IntVal, ConstInt);
+ return false;
+ }
+};
+
+/// Match a specific integer constant or constant splat value.
+inline SpecificInt_match m_SpecificInt(APInt V) {
+ return SpecificInt_match(std::move(V));
+}
+inline SpecificInt_match m_SpecificInt(uint64_t V) {
+ return SpecificInt_match(APInt(64, V));
+}
+
+inline SpecificInt_match m_Zero() { return m_SpecificInt(0U); }
+inline SpecificInt_match m_AllOnes() { return m_SpecificInt(~0U); }
+
+/// Match true boolean value based on the information provided by
+/// TargetLowering.
+inline auto m_True() {
+ return TLI_pred_match{
+ [](const TargetLowering &TLI, SDValue N) {
+ APInt ConstVal;
+ if (sd_match(N, m_ConstInt(ConstVal)))
+ switch (TLI.getBooleanContents(N.getValueType())) {
+ case TargetLowering::ZeroOrOneBooleanContent:
+ return ConstVal.isOne();
+ case TargetLowering::ZeroOrNegativeOneBooleanContent:
+ return ConstVal.isAllOnes();
+ case TargetLowering::UndefinedBooleanContent:
+ return (ConstVal & 0x01) == 1;
+ }
+
+ return false;
+ },
+ m_Value()};
+}
+/// Match false boolean value based on the information provided by
+/// TargetLowering.
+inline auto m_False() {
+ return TLI_pred_match{
+ [](const TargetLowering &TLI, SDValue N) {
+ APInt ConstVal;
+ if (sd_match(N, m_ConstInt(ConstVal)))
+ switch (TLI.getBooleanContents(N.getValueType())) {
+ case TargetLowering::ZeroOrOneBooleanContent:
+ case TargetLowering::ZeroOrNegativeOneBooleanContent:
+ return ConstVal.isZero();
+ case TargetLowering::UndefinedBooleanContent:
+ return (ConstVal & 0x01) == 0;
+ }
+
+ return false;
+ },
+ m_Value()};
+}
+} // namespace SDPatternMatch
+} // namespace llvm
+#endif
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 7bb12d8f065c..2fc1ceafa927 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1479,46 +1479,11 @@ public:
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
SDValue Offset, SDValue Stride, SDValue Mask,
- SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
- Align Alignment, MachineMemOperand::Flags MMOFlags,
- const AAMDNodes &AAInfo,
- const MDNode *Ranges = nullptr,
- bool IsExpanding = false);
- inline SDValue getStridedLoadVP(
- ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL,
- SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
- SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
- MaybeAlign Alignment = MaybeAlign(),
- MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
- const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr,
- bool IsExpanding = false) {
- // Ensures that codegen never sees a None Alignment.
- return getStridedLoadVP(AM, ExtType, VT, DL, Chain, Ptr, Offset, Stride,
- Mask, EVL, PtrInfo, MemVT,
- Alignment.value_or(getEVTAlign(MemVT)), MMOFlags,
- AAInfo, Ranges, IsExpanding);
- }
- SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
- EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
- SDValue Offset, SDValue Stride, SDValue Mask,
SDValue EVL, EVT MemVT, MachineMemOperand *MMO,
bool IsExpanding = false);
SDValue getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
SDValue Stride, SDValue Mask, SDValue EVL,
- MachinePointerInfo PtrInfo, MaybeAlign Alignment,
- MachineMemOperand::Flags MMOFlags,
- const AAMDNodes &AAInfo,
- const MDNode *Ranges = nullptr,
- bool IsExpanding = false);
- SDValue getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
- SDValue Stride, SDValue Mask, SDValue EVL,
MachineMemOperand *MMO, bool IsExpanding = false);
- SDValue
- getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT,
- SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask,
- SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
- MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags,
- const AAMDNodes &AAInfo, bool IsExpanding = false);
SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT,
SDValue Chain, SDValue Ptr, SDValue Stride,
SDValue Mask, SDValue EVL, EVT MemVT,
@@ -1534,13 +1499,6 @@ public:
bool IsCompressing = false);
SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val,
SDValue Ptr, SDValue Stride, SDValue Mask,
- SDValue EVL, MachinePointerInfo PtrInfo,
- EVT SVT, Align Alignment,
- MachineMemOperand::Flags MMOFlags,
- const AAMDNodes &AAInfo,
- bool IsCompressing = false);
- SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val,
- SDValue Ptr, SDValue Stride, SDValue Mask,
SDValue EVL, EVT SVT, MachineMemOperand *MMO,
bool IsCompressing = false);
SDValue getIndexedStridedStoreVP(SDValue OrigStore, const SDLoc &DL,
diff --git a/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h b/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
index a26c44bf7e9c..d368c7e0ece8 100644
--- a/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
+++ b/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
@@ -562,6 +562,17 @@ public:
uint64_t getEntryOffset() const { return EntryOffset; }
};
+ /// Offsets for the start of various important tables from the start of the
+ /// section.
+ struct DWARFDebugNamesOffsets {
+ uint64_t CUsBase;
+ uint64_t BucketsBase;
+ uint64_t HashesBase;
+ uint64_t StringOffsetsBase;
+ uint64_t EntryOffsetsBase;
+ uint64_t EntriesBase;
+ };
+
/// Represents a single accelerator table within the DWARF v5 .debug_names
/// section.
class NameIndex {
@@ -572,12 +583,7 @@ public:
// Base of the whole unit and of various important tables, as offsets from
// the start of the section.
uint64_t Base;
- uint64_t CUsBase;
- uint64_t BucketsBase;
- uint64_t HashesBase;
- uint64_t StringOffsetsBase;
- uint64_t EntryOffsetsBase;
- uint64_t EntriesBase;
+ DWARFDebugNamesOffsets Offsets;
void dumpCUs(ScopedPrinter &W) const;
void dumpLocalTUs(ScopedPrinter &W) const;
@@ -638,7 +644,7 @@ public:
/// Returns the Entry at the relative `Offset` from the start of the Entry
/// pool.
Expected<Entry> getEntryAtRelativeOffset(uint64_t Offset) const {
- auto OffsetFromSection = Offset + this->EntriesBase;
+ auto OffsetFromSection = Offset + this->Offsets.EntriesBase;
return getEntry(&OffsetFromSection);
}
@@ -793,6 +799,12 @@ public:
const NameIndex *getCUNameIndex(uint64_t CUOffset);
};
+/// Calculates the starting offsets for various sections within the
+/// .debug_names section.
+void findDebugNamesOffsets(DWARFDebugNames::DWARFDebugNamesOffsets &Offsets,
+ uint64_t HdrSize, const dwarf::DwarfFormat Format,
+ const DWARFDebugNames::Header &Hdr);
+
/// If `Name` is the name of a templated function that includes template
/// parameters, returns a substring of `Name` containing no template
/// parameters.
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 1481328bf483..77d207f2b10a 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -710,16 +710,14 @@ def OMP_Requires : Directive<"requires"> {
}
def OMP_Nothing : Directive<"nothing"> {}
def OMP_TargetData : Directive<"target data"> {
- let allowedClauses = [
- VersionedClause<OMPC_UseDevicePtr>,
- VersionedClause<OMPC_UseDeviceAddr, 50>
- ];
let allowedOnceClauses = [
VersionedClause<OMPC_Device>,
VersionedClause<OMPC_If>
];
let requiredClauses = [
- VersionedClause<OMPC_Map>
+ VersionedClause<OMPC_Map>,
+ VersionedClause<OMPC_UseDevicePtr>,
+ VersionedClause<OMPC_UseDeviceAddr, 50>
];
}
def OMP_TargetEnterData : Directive<"target enter data"> {
diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td
index 864f87f33838..d22eb76d2292 100644
--- a/llvm/include/llvm/IR/Attributes.td
+++ b/llvm/include/llvm/IR/Attributes.td
@@ -339,14 +339,26 @@ def UseSampleProfile : StrBoolAttr<"use-sample-profile">;
def DenormalFPMath : ComplexStrAttr<"denormal-fp-math", [FnAttr]>;
def DenormalFPMathF32 : ComplexStrAttr<"denormal-fp-math-f32", [FnAttr]>;
+// Attribute compatiblity rules are generated to check the attribute of the
+// caller and callee and decide whether inlining should be allowed. CompatRule
+// and child classes are used for the rule generation. CompatRule takes only a
+// compare function which could be templated with the attribute type.
+// CompatRuleStrAttr takes the compare function and the string attribute for
+// checking compatibility for inline substitution.
class CompatRule<string F> {
- // The name of the function called to check the attribute of the caller and
- // callee and decide whether inlining should be allowed. The function's
- // signature must match "bool(const Function&, const Function &)", where the
- // first parameter is the reference to the caller and the second parameter is
- // the reference to the callee. It must return false if the attributes of the
- // caller and callee are incompatible, and true otherwise.
+ // The function's signature must match "bool(const Function&, const
+ // Function&)", where the first parameter is the reference to the caller and
+ // the second parameter is the reference to the callee. It must return false
+ // if the attributes of the caller and callee are incompatible, and true
+ // otherwise.
string CompatFunc = F;
+ string AttrName = "";
+}
+
+class CompatRuleStrAttr<string F, string Attr> : CompatRule<F> {
+ // The checker function is extended with an third argument as the function
+ // attribute string "bool(const Function&, const Function&, const StringRef&)".
+ string AttrName = Attr;
}
def : CompatRule<"isEqual<SanitizeAddressAttr>">;
@@ -359,7 +371,9 @@ def : CompatRule<"isEqual<ShadowCallStackAttr>">;
def : CompatRule<"isEqual<UseSampleProfileAttr>">;
def : CompatRule<"isEqual<NoProfileAttr>">;
def : CompatRule<"checkDenormMode">;
-
+def : CompatRuleStrAttr<"isEqual", "sign-return-address">;
+def : CompatRuleStrAttr<"isEqual", "sign-return-address-key">;
+def : CompatRuleStrAttr<"isEqual", "branch-protection-pauth-lr">;
class MergeRule<string F> {
// The name of the function called to merge the attributes of the caller and
diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h
index b5dcc7fbc1d9..c0ac9a4aa675 100644
--- a/llvm/include/llvm/IR/Constants.h
+++ b/llvm/include/llvm/IR/Constants.h
@@ -78,13 +78,20 @@ public:
/// Class for constant integers.
class ConstantInt final : public ConstantData {
friend class Constant;
+ friend class ConstantVector;
APInt Val;
- ConstantInt(IntegerType *Ty, const APInt &V);
+ ConstantInt(Type *Ty, const APInt &V);
void destroyConstantImpl();
+ /// Return a ConstantInt with the specified value and an implied Type. The
+ /// type is the vector type whose integer element type corresponds to the bit
+ /// width of the value.
+ static ConstantInt *get(LLVMContext &Context, ElementCount EC,
+ const APInt &V);
+
public:
ConstantInt(const ConstantInt &) = delete;
@@ -136,7 +143,7 @@ public:
/// Return the constant's value.
inline const APInt &getValue() const { return Val; }
- /// getBitWidth - Return the bitwidth of this constant.
+ /// getBitWidth - Return the scalar bitwidth of this constant.
unsigned getBitWidth() const { return Val.getBitWidth(); }
/// Return the constant as a 64-bit unsigned integer value after it
@@ -259,6 +266,7 @@ public:
///
class ConstantFP final : public ConstantData {
friend class Constant;
+ friend class ConstantVector;
APFloat Val;
@@ -266,6 +274,12 @@ class ConstantFP final : public ConstantData {
void destroyConstantImpl();
+ /// Return a ConstantFP with the specified value and an implied Type. The
+ /// type is the vector type whose element type has the same floating point
+ /// semantics as the value.
+ static ConstantFP *get(LLVMContext &Context, ElementCount EC,
+ const APFloat &V);
+
public:
ConstantFP(const ConstantFP &) = delete;
diff --git a/llvm/include/llvm/IR/DebugProgramInstruction.h b/llvm/include/llvm/IR/DebugProgramInstruction.h
index 1fa6b6f51964..97089098ee53 100644
--- a/llvm/include/llvm/IR/DebugProgramInstruction.h
+++ b/llvm/include/llvm/IR/DebugProgramInstruction.h
@@ -62,6 +62,8 @@ class BasicBlock;
class MDNode;
class Module;
class DbgVariableIntrinsic;
+class DbgInfoIntrinsic;
+class DbgLabelInst;
class DIAssignID;
class DPMarker;
class DPValue;
@@ -79,14 +81,14 @@ class raw_ostream;
/// deleteRecord
/// clone
/// isIdenticalToWhenDefined
-/// isEquivalentTo
/// both print methods
+/// createDebugIntrinsic
class DbgRecord : public ilist_node<DbgRecord> {
public:
/// Marker that this DbgRecord is linked into.
DPMarker *Marker = nullptr;
/// Subclass discriminator.
- enum Kind : uint8_t { ValueKind };
+ enum Kind : uint8_t { ValueKind, LabelKind };
protected:
DebugLoc DbgLoc;
@@ -104,9 +106,16 @@ public:
void print(raw_ostream &O, bool IsForDebug = false) const;
void print(raw_ostream &O, ModuleSlotTracker &MST, bool IsForDebug) const;
bool isIdenticalToWhenDefined(const DbgRecord &R) const;
- bool isEquivalentTo(const DbgRecord &R) const;
+ /// Convert this DbgRecord back into an appropriate llvm.dbg.* intrinsic.
+ /// \p InsertBefore Optional position to insert this intrinsic.
+ /// \returns A new llvm.dbg.* intrinsic representiung this DbgRecord.
+ DbgInfoIntrinsic *createDebugIntrinsic(Module *M,
+ Instruction *InsertBefore) const;
///@}
+ /// Same as isIdenticalToWhenDefined but checks DebugLoc too.
+ bool isEquivalentTo(const DbgRecord &R) const;
+
Kind getRecordKind() const { return RecordKind; }
void setMarker(DPMarker *M) { Marker = M; }
@@ -156,6 +165,38 @@ protected:
~DbgRecord() = default;
};
+inline raw_ostream &operator<<(raw_ostream &OS, const DbgRecord &R) {
+ R.print(OS);
+ return OS;
+}
+
+/// Records a position in IR for a source label (DILabel). Corresponds to the
+/// llvm.dbg.label intrinsic.
+/// FIXME: Rename DbgLabelRecord when DPValue is renamed to DbgVariableRecord.
+class DPLabel : public DbgRecord {
+ DILabel *Label;
+
+public:
+ DPLabel(DILabel *Label, DebugLoc DL)
+ : DbgRecord(LabelKind, DL), Label(Label) {
+ assert(Label && "Unexpected nullptr");
+ }
+
+ DPLabel *clone() const;
+ void print(raw_ostream &O, bool IsForDebug = false) const;
+ void print(raw_ostream &ROS, ModuleSlotTracker &MST, bool IsForDebug) const;
+ DbgLabelInst *createDebugIntrinsic(Module *M,
+ Instruction *InsertBefore) const;
+
+ void setLabel(DILabel *NewLabel) { Label = NewLabel; }
+ DILabel *getLabel() const { return Label; }
+
+ /// Support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const DbgRecord *E) {
+ return E->getRecordKind() == LabelKind;
+ }
+};
+
/// Record of a variable value-assignment, aka a non instruction representation
/// of the dbg.value intrinsic.
///
@@ -510,11 +551,6 @@ inline raw_ostream &operator<<(raw_ostream &OS, const DPMarker &Marker) {
return OS;
}
-inline raw_ostream &operator<<(raw_ostream &OS, const DPValue &Value) {
- Value.print(OS);
- return OS;
-}
-
/// Inline helper to return a range of DPValues attached to a marker. It needs
/// to be inlined as it's frequently called, but also come after the declaration
/// of DPMarker. Thus: it's pre-declared by users like Instruction, then an
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index b8d578d0fee0..fbaaef8ea443 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -531,6 +531,9 @@ public:
class DbgLabelInst : public DbgInfoIntrinsic {
public:
DILabel *getLabel() const { return cast<DILabel>(getRawLabel()); }
+ void setLabel(DILabel *NewLabel) {
+ setArgOperand(0, MetadataAsValue::get(getContext(), NewLabel));
+ }
Metadata *getRawLabel() const {
return cast<MetadataAsValue>(getArgOperand(0))->getMetadata();
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 8c0d4d5db32d..d7c1ce153a6c 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1933,6 +1933,26 @@ let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in {
LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_i32_ty]>;
+ def int_vp_sadd_sat : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_uadd_sat : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_ssub_sat : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
+ def int_vp_usub_sat : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
+ [ LLVMMatchType<0>,
+ LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty]>;
// Floating-point arithmetic
def int_vp_fadd : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 921e5b95ae03..6b045e412cd5 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1367,6 +1367,27 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
llvm_i32_ty,
llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
+
+ class SVE2_1VectorArg_Pred_Intrinsic
+ : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+ class SVE2_1VectorArgIndexed_Pred_Intrinsic
+ : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [llvm_anyvector_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
+ class SVE2_Pred_1VectorArgIndexed_Intrinsic
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+
+ class SVE2_Pred_1VectorArg_Intrinsic
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [IntrNoMem]>;
// NOTE: There is no relationship between these intrinsics beyond an attempt
// to reuse currently identical class definitions.
@@ -3610,23 +3631,10 @@ def int_aarch64_sve_extq : AdvSIMD_2VectorArgIndexed_Intrinsic;
//
// SVE2.1 - Move predicate to/from vector
//
-def int_aarch64_sve_pmov_to_pred_lane :
- DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty, llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<1>>]>;
-
-def int_aarch64_sve_pmov_to_pred_lane_zero :
- DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
-
-def int_aarch64_sve_pmov_to_vector_lane_merging :
- DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty],
- [IntrNoMem, ImmArg<ArgIndex<2>>]>;
+def int_aarch64_sve_pmov_to_pred_lane : SVE2_1VectorArgIndexed_Pred_Intrinsic;
+
+def int_aarch64_sve_pmov_to_pred_lane_zero : SVE2_1VectorArg_Pred_Intrinsic;
-def int_aarch64_sve_pmov_to_vector_lane_zeroing :
- DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
- [IntrNoMem]>;
+def int_aarch64_sve_pmov_to_vector_lane_merging : SVE2_Pred_1VectorArgIndexed_Intrinsic;
+
+def int_aarch64_sve_pmov_to_vector_lane_zeroing : SVE2_Pred_1VectorArg_Intrinsic; \ No newline at end of file
diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def
index 3b32b60609f5..4089acf9ec3f 100644
--- a/llvm/include/llvm/IR/VPIntrinsics.def
+++ b/llvm/include/llvm/IR/VPIntrinsics.def
@@ -293,6 +293,30 @@ BEGIN_REGISTER_VP(vp_fshr, 3, 4, VP_FSHR, -1)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(fshr)
VP_PROPERTY_FUNCTIONAL_SDOPC(FSHR)
END_REGISTER_VP(vp_fshr, VP_FSHR)
+
+// llvm.vp.sadd.sat(x,y,mask,vlen)
+BEGIN_REGISTER_VP(vp_sadd_sat, 2, 3, VP_SADDSAT, -1)
+VP_PROPERTY_FUNCTIONAL_INTRINSIC(sadd_sat)
+VP_PROPERTY_FUNCTIONAL_SDOPC(SADDSAT)
+END_REGISTER_VP(vp_sadd_sat, VP_SADDSAT)
+
+// llvm.vp.uadd.sat(x,y,mask,vlen)
+BEGIN_REGISTER_VP(vp_uadd_sat, 2, 3, VP_UADDSAT, -1)
+VP_PROPERTY_FUNCTIONAL_INTRINSIC(uadd_sat)
+VP_PROPERTY_FUNCTIONAL_SDOPC(UADDSAT)
+END_REGISTER_VP(vp_uadd_sat, VP_UADDSAT)
+
+// llvm.vp.ssub.sat(x,y,mask,vlen)
+BEGIN_REGISTER_VP(vp_ssub_sat, 2, 3, VP_SSUBSAT, -1)
+VP_PROPERTY_FUNCTIONAL_INTRINSIC(ssub_sat)
+VP_PROPERTY_FUNCTIONAL_SDOPC(SSUBSAT)
+END_REGISTER_VP(vp_ssub_sat, VP_SSUBSAT)
+
+// llvm.vp.usub.sat(x,y,mask,vlen)
+BEGIN_REGISTER_VP(vp_usub_sat, 2, 3, VP_USUBSAT, -1)
+VP_PROPERTY_FUNCTIONAL_INTRINSIC(usub_sat)
+VP_PROPERTY_FUNCTIONAL_SDOPC(USUBSAT)
+END_REGISTER_VP(vp_usub_sat, VP_USUBSAT)
///// } Integer Arithmetic
///// Floating-Point Arithmetic {
diff --git a/llvm/include/llvm/LTO/Config.h b/llvm/include/llvm/LTO/Config.h
index 6fb55f1cf168..482b6e55a19d 100644
--- a/llvm/include/llvm/LTO/Config.h
+++ b/llvm/include/llvm/LTO/Config.h
@@ -60,9 +60,6 @@ struct Config {
bool VerifyEach = false;
bool DisableVerify = false;
- /// Use the standard optimization pipeline.
- bool UseDefaultPipeline = false;
-
/// Flag to indicate that the optimizer should not assume builtins are present
/// on the target.
bool Freestanding = false;
diff --git a/llvm/include/llvm/Passes/CodeGenPassBuilder.h b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
index 80bbfb75185a..dc60727729f7 100644
--- a/llvm/include/llvm/Passes/CodeGenPassBuilder.h
+++ b/llvm/include/llvm/Passes/CodeGenPassBuilder.h
@@ -37,6 +37,7 @@
#include "llvm/CodeGen/InterleavedLoadCombine.h"
#include "llvm/CodeGen/JMCInstrumenter.h"
#include "llvm/CodeGen/LowerEmuTLS.h"
+#include "llvm/CodeGen/MIRPrinter.h"
#include "llvm/CodeGen/MachinePassManager.h"
#include "llvm/CodeGen/PreISelIntrinsicLowering.h"
#include "llvm/CodeGen/ReplaceWithVeclib.h"
@@ -88,12 +89,8 @@ namespace llvm {
#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME) \
struct PASS_NAME : public MachinePassInfoMixin<PASS_NAME> { \
template <typename... Ts> PASS_NAME(Ts &&...) {} \
- Error run(Module &, MachineFunctionAnalysisManager &) { \
- return Error::success(); \
- } \
- PreservedAnalyses run(MachineFunction &, \
- MachineFunctionAnalysisManager &) { \
- llvm_unreachable("this api is to make new PM api happy"); \
+ PreservedAnalyses run(Module &, ModuleAnalysisManager &) { \
+ return PreservedAnalyses::all(); \
} \
};
#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME) \
@@ -132,8 +129,8 @@ public:
Opt.OptimizeRegAlloc = getOptLevel() != CodeGenOptLevel::None;
}
- Error buildPipeline(ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
- raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
+ Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out,
+ raw_pwrite_stream *DwoOut,
CodeGenFileType FileType) const;
PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const {
@@ -149,7 +146,15 @@ protected:
using is_function_pass_t = decltype(std::declval<PassT &>().run(
std::declval<Function &>(), std::declval<FunctionAnalysisManager &>()));
+ template <typename PassT>
+ using is_machine_function_pass_t = decltype(std::declval<PassT &>().run(
+ std::declval<MachineFunction &>(),
+ std::declval<MachineFunctionAnalysisManager &>()));
+
// Function object to maintain state while adding codegen IR passes.
+ // TODO: add a Function -> MachineFunction adaptor and merge
+ // AddIRPass/AddMachinePass so we can have a function pipeline that runs both
+ // function passes and machine function passes.
class AddIRPass {
public:
AddIRPass(ModulePassManager &MPM, const DerivedT &PB) : MPM(MPM), PB(PB) {}
@@ -196,31 +201,47 @@ protected:
// Function object to maintain state while adding codegen machine passes.
class AddMachinePass {
public:
- AddMachinePass(MachineFunctionPassManager &PM, const DerivedT &PB)
- : PM(PM), PB(PB) {}
+ AddMachinePass(ModulePassManager &MPM, const DerivedT &PB)
+ : MPM(MPM), PB(PB) {}
+ ~AddMachinePass() {
+ if (!MFPM.isEmpty())
+ MPM.addPass(createModuleToMachineFunctionPassAdaptor(std::move(MFPM)));
+ }
+
+ template <typename PassT>
+ void operator()(PassT &&Pass, bool Force = false,
+ StringRef Name = PassT::name()) {
+ static_assert((is_detected<is_machine_function_pass_t, PassT>::value ||
+ is_detected<is_module_pass_t, PassT>::value) &&
+ "Only module pass and function pass are supported.");
- template <typename PassT> void operator()(PassT &&Pass) {
- if (!PB.runBeforeAdding(PassT::name()))
+ if (!Force && !PB.runBeforeAdding(Name))
return;
- PM.addPass(std::forward<PassT>(Pass));
+ // Add Function Pass
+ if constexpr (is_detected<is_machine_function_pass_t, PassT>::value) {
+ MFPM.addPass(std::forward<PassT>(Pass));
- for (auto &C : PB.AfterCallbacks)
- C(PassT::name());
- }
+ for (auto &C : PB.AfterCallbacks)
+ C(Name);
+ } else {
+ // Add Module Pass
+ if (!MFPM.isEmpty()) {
+ MPM.addPass(
+ createModuleToMachineFunctionPassAdaptor(std::move(MFPM)));
+ MFPM = MachineFunctionPassManager();
+ }
- template <typename PassT> void insertPass(StringRef PassName, PassT Pass) {
- PB.AfterCallbacks.emplace_back(
- [this, PassName, Pass = std::move(Pass)](StringRef Name) {
- if (PassName == Name)
- this->PM.addPass(std::move(Pass));
- });
- }
+ MPM.addPass(std::forward<PassT>(Pass));
- MachineFunctionPassManager releasePM() { return std::move(PM); }
+ for (auto &C : PB.AfterCallbacks)
+ C(Name);
+ }
+ }
private:
- MachineFunctionPassManager &PM;
+ ModulePassManager &MPM;
+ MachineFunctionPassManager MFPM;
const DerivedT &PB;
};
@@ -467,30 +488,43 @@ private:
template <typename Derived>
Error CodeGenPassBuilder<Derived>::buildPipeline(
- ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
- raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
+ ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
CodeGenFileType FileType) const {
auto StartStopInfo = TargetPassConfig::getStartStopInfo(*PIC);
if (!StartStopInfo)
return StartStopInfo.takeError();
setStartStopPasses(*StartStopInfo);
- AddIRPass addIRPass(MPM, derived());
- // `ProfileSummaryInfo` is always valid.
- addIRPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
- addIRPass(RequireAnalysisPass<CollectorMetadataAnalysis, Module>());
- addISelPasses(addIRPass);
- AddMachinePass addPass(MFPM, derived());
+ bool PrintAsm = TargetPassConfig::willCompleteCodeGenPipeline();
+ bool PrintMIR = !PrintAsm && FileType != CodeGenFileType::Null;
+
+ {
+ AddIRPass addIRPass(MPM, derived());
+ addIRPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
+ addIRPass(RequireAnalysisPass<CollectorMetadataAnalysis, Module>());
+ addISelPasses(addIRPass);
+ }
+
+ AddMachinePass addPass(MPM, derived());
+
+ if (PrintMIR)
+ addPass(PrintMIRPreparePass(Out), /*Force=*/true);
+
if (auto Err = addCoreISelPasses(addPass))
return std::move(Err);
if (auto Err = derived().addMachinePasses(addPass))
return std::move(Err);
- derived().addAsmPrinter(
- addPass, [this, &Out, DwoOut, FileType](MCContext &Ctx) {
- return this->TM.createMCStreamer(Out, DwoOut, FileType, Ctx);
- });
+ if (PrintAsm) {
+ derived().addAsmPrinter(
+ addPass, [this, &Out, DwoOut, FileType](MCContext &Ctx) {
+ return this->TM.createMCStreamer(Out, DwoOut, FileType, Ctx);
+ });
+ }
+
+ if (PrintMIR)
+ addPass(PrintMIRPass(Out), /*Force=*/true);
addPass(FreeMachineFunctionPass());
return verifyStartStop(*StartStopInfo);
diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h
index 10c5b7c00bae..6822cfdb4957 100644
--- a/llvm/include/llvm/Passes/PassBuilder.h
+++ b/llvm/include/llvm/Passes/PassBuilder.h
@@ -133,7 +133,8 @@ public:
void crossRegisterProxies(LoopAnalysisManager &LAM,
FunctionAnalysisManager &FAM,
CGSCCAnalysisManager &CGAM,
- ModuleAnalysisManager &MAM);
+ ModuleAnalysisManager &MAM,
+ MachineFunctionAnalysisManager *MFAM = nullptr);
/// Registers all available module analysis passes.
///
@@ -569,9 +570,9 @@ public:
ModulePipelineParsingCallbacks.push_back(C);
}
void registerPipelineParsingCallback(
- const std::function<bool(StringRef Name, MachineFunctionPassManager &)>
- &C) {
- MachinePipelineParsingCallbacks.push_back(C);
+ const std::function<bool(StringRef Name, MachineFunctionPassManager &,
+ ArrayRef<PipelineElement>)> &C) {
+ MachineFunctionPipelineParsingCallbacks.push_back(C);
}
/// @}}
@@ -733,8 +734,10 @@ private:
// Machine pass callbackcs
SmallVector<std::function<void(MachineFunctionAnalysisManager &)>, 2>
MachineFunctionAnalysisRegistrationCallbacks;
- SmallVector<std::function<bool(StringRef, MachineFunctionPassManager &)>, 2>
- MachinePipelineParsingCallbacks;
+ SmallVector<std::function<bool(StringRef, MachineFunctionPassManager &,
+ ArrayRef<PipelineElement>)>,
+ 2>
+ MachineFunctionPipelineParsingCallbacks;
};
/// This utility template takes care of adding require<> and invalidate<>
diff --git a/llvm/include/llvm/Target/TargetMachine.h b/llvm/include/llvm/Target/TargetMachine.h
index 7462f61d32b5..d7ce088cad49 100644
--- a/llvm/include/llvm/Target/TargetMachine.h
+++ b/llvm/include/llvm/Target/TargetMachine.h
@@ -34,8 +34,6 @@ using ModulePassManager = PassManager<Module>;
class Function;
class GlobalValue;
-class MachineFunctionPassManager;
-class MachineFunctionAnalysisManager;
class MachineModuleInfoWrapperPass;
class Mangler;
class MCAsmInfo;
@@ -455,11 +453,9 @@ public:
bool DisableVerify = true,
MachineModuleInfoWrapperPass *MMIWP = nullptr) override;
- virtual Error buildCodeGenPipeline(ModulePassManager &,
- MachineFunctionPassManager &,
- MachineFunctionAnalysisManager &,
- raw_pwrite_stream &, raw_pwrite_stream *,
- CodeGenFileType, CGPassBuilderOption,
+ virtual Error buildCodeGenPipeline(ModulePassManager &, raw_pwrite_stream &,
+ raw_pwrite_stream *, CodeGenFileType,
+ CGPassBuilderOption,
PassInstrumentationCallbacks *) {
return make_error<StringError>("buildCodeGenPipeline is not overridden",
inconvertibleErrorCode());
diff --git a/llvm/include/llvm/Target/TargetSchedule.td b/llvm/include/llvm/Target/TargetSchedule.td
index e2781a5d1ea5..48c9387977c0 100644
--- a/llvm/include/llvm/Target/TargetSchedule.td
+++ b/llvm/include/llvm/Target/TargetSchedule.td
@@ -399,6 +399,8 @@ def NoSchedPred : MCSchedPredicate<TruePred>;
class SchedVar<SchedPredicateBase pred, list<SchedReadWrite> selected> {
SchedPredicateBase Predicate = pred;
list<SchedReadWrite> Selected = selected;
+ // SchedModel silences warnings but is ignored.
+ SchedMachineModel SchedModel = ?;
}
// SchedModel silences warnings but is ignored.
diff --git a/llvm/include/llvm/TargetParser/PPCTargetParser.def b/llvm/include/llvm/TargetParser/PPCTargetParser.def
index f2c44b46fa67..88c7304659c4 100644
--- a/llvm/include/llvm/TargetParser/PPCTargetParser.def
+++ b/llvm/include/llvm/TargetParser/PPCTargetParser.def
@@ -126,4 +126,61 @@ PPC_LNX_CPU("power10",47)
#undef PPC_LNX_DEFINE_OFFSETS
#undef PPC_LNX_FEATURE
#undef PPC_LNX_CPU
+
+// Definition of the following values are found in the AIX header
+// file: </usr/include/sys/systemcfg.h>.
+#ifndef AIX_POWERPC_USE_SYS_CONF
+ #define AIX_POWERPC_USE_SYS_CONF
+ #define AIX_SYSCON_IMPL_IDX 1
+ #define AIX_PPC7_VALUE 0x00008000
+ #define AIX_PPC8_VALUE 0x00010000
+ #define AIX_PPC9_VALUE 0x00020000
+ #define AIX_PPC10_VALUE 0x00040000
+
+ // Supported SUPPORT_METHOD values.
+ #define AIX_BUILTIN_PPC_TRUE 1
+ #define AIX_BUILTIN_PPC_FALSE 0
+ #define USE_SYS_CONF 2
+
+ // Supported COMPARE_OP values.
+ #define COMP_EQ 0
+
+#endif
+
+// The value of SUPPORT_METHOD can be AIX_BUILTIN_PPC_TRUE,
+// AIX_BUILTIN_PPC_FALSE, or USE_SYS_CONF.
+// When the value of SUPPORT_METHOD is USE_SYS_CONF, the return value
+// depends on the result of comparing the data member of
+// _system_configuration specified by INDEX with a certain value.
+
+#ifndef PPC_AIX_CPU
+ #define PPC_AIX_CPU(NAME, SUPPORT_METHOD, INDEX, COMPARE_OP, VALUE)
+#endif
+
+// __builtin_cpu_is() is supported only on Power7 and up.
+PPC_AIX_CPU("power4",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("ppc970",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("power5",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("power5+",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("power6",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("ppc-cell-be",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("power6x",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("ppca2",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("ppc405",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("ppc440",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("ppc464",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("ppc476",AIX_BUILTIN_PPC_FALSE,0,0,0)
+PPC_AIX_CPU("power7",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,COMP_EQ,AIX_PPC7_VALUE)
+PPC_AIX_CPU("power8",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,COMP_EQ,AIX_PPC8_VALUE)
+PPC_AIX_CPU("power9",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,COMP_EQ,AIX_PPC9_VALUE)
+PPC_AIX_CPU("power10",USE_SYS_CONF,AIX_SYSCON_IMPL_IDX,COMP_EQ,AIX_PPC10_VALUE)
+#undef PPC_AIX_CPU
+
+// PPC_SYSTEMCONFIG_TYPE defines the IR data structure of kernel variable
+// `_system_configuration`, that is found in the AIX OS header file: </usr/include/sys/systemcfg.h>.
+#ifndef PPC_SYSTEMCONFIG_TYPE
+#define PPC_SYSTEMCONFIG_TYPE \
+Int32Ty, Int32Ty, Int32Ty
+#endif
+
#endif // !PPC_TGT_PARSER_UNDEF_MACROS
diff --git a/llvm/include/llvm/TargetParser/X86TargetParser.def b/llvm/include/llvm/TargetParser/X86TargetParser.def
index 4c630c1eb06e..a9ed56fcd470 100644
--- a/llvm/include/llvm/TargetParser/X86TargetParser.def
+++ b/llvm/include/llvm/TargetParser/X86TargetParser.def
@@ -265,6 +265,7 @@ X86_MICROARCH_LEVEL(X86_64_BASELINE,"x86-64", 95)
X86_MICROARCH_LEVEL(X86_64_V2, "x86-64-v2", 96)
X86_MICROARCH_LEVEL(X86_64_V3, "x86-64-v3", 97)
X86_MICROARCH_LEVEL(X86_64_V4, "x86-64-v4", 98)
+X86_MICROARCH_LEVEL(APXF, "apxf", 111)
#undef X86_FEATURE_COMPAT
#undef X86_FEATURE
#undef X86_MICROARCH_LEVEL
diff --git a/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h b/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h
index c87c6453500c..13dda6d61284 100644
--- a/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h
+++ b/llvm/include/llvm/Transforms/IPO/HotColdSplitting.h
@@ -24,6 +24,7 @@ class TargetTransformInfo;
class OptimizationRemarkEmitter;
class AssumptionCache;
class DominatorTree;
+class CodeExtractor;
class CodeExtractorAnalysisCache;
/// A sequence of basic blocks.
@@ -43,19 +44,17 @@ public:
private:
bool isFunctionCold(const Function &F) const;
- bool isBasicBlockCold(BasicBlock* BB,
- BranchProbability ColdProbThresh,
- SmallPtrSetImpl<BasicBlock *> &ColdBlocks,
+ bool isBasicBlockCold(BasicBlock *BB, BranchProbability ColdProbThresh,
SmallPtrSetImpl<BasicBlock *> &AnnotatedColdBlocks,
BlockFrequencyInfo *BFI) const;
bool shouldOutlineFrom(const Function &F) const;
bool outlineColdRegions(Function &F, bool HasProfileSummary);
- Function *extractColdRegion(const BlockSequence &Region,
+ bool isSplittingBeneficial(CodeExtractor &CE, const BlockSequence &Region,
+ TargetTransformInfo &TTI);
+ Function *extractColdRegion(BasicBlock &EntryPoint, CodeExtractor &CE,
const CodeExtractorAnalysisCache &CEAC,
- DominatorTree &DT, BlockFrequencyInfo *BFI,
- TargetTransformInfo &TTI,
- OptimizationRemarkEmitter &ORE,
- AssumptionCache *AC, unsigned Count);
+ BlockFrequencyInfo *BFI, TargetTransformInfo &TTI,
+ OptimizationRemarkEmitter &ORE);
ProfileSummaryInfo *PSI;
function_ref<BlockFrequencyInfo *(Function &)> GetBFI;
function_ref<TargetTransformInfo &(Function &)> GetTTI;
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 6bf0d2f56eb4..5916d2ab48ec 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -364,7 +364,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
if (Size.getBitWidth() > 64)
return false;
- const uint64_t LoadSize = Size.getZExtValue();
+ const TypeSize LoadSize = TypeSize::getFixed(Size.getZExtValue());
// Otherwise, be a little bit aggressive by scanning the local block where we
// want to check to see if the pointer is already being loaded or stored
@@ -414,11 +414,11 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
// Handle trivial cases.
if (AccessedPtr == V &&
- LoadSize <= DL.getTypeStoreSize(AccessedTy))
+ TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
return true;
if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
- LoadSize <= DL.getTypeStoreSize(AccessedTy))
+ TypeSize::isKnownLE(LoadSize, DL.getTypeStoreSize(AccessedTy)))
return true;
}
return false;
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 04f317228b3e..653b3d4ffd98 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -7194,6 +7194,21 @@ bool llvm::propagatesPoison(const Use &PoisonOp) {
// corresponding lanes are poison.
return true;
case Intrinsic::ctpop:
+ case Intrinsic::ctlz:
+ case Intrinsic::cttz:
+ case Intrinsic::abs:
+ case Intrinsic::smax:
+ case Intrinsic::smin:
+ case Intrinsic::umax:
+ case Intrinsic::umin:
+ case Intrinsic::bitreverse:
+ case Intrinsic::bswap:
+ case Intrinsic::sadd_sat:
+ case Intrinsic::ssub_sat:
+ case Intrinsic::sshl_sat:
+ case Intrinsic::uadd_sat:
+ case Intrinsic::usub_sat:
+ case Intrinsic::ushl_sat:
return true;
}
}
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 515a1d0caa04..832907a3f53f 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -3060,48 +3060,49 @@ Error BitcodeReader::parseConstants() {
V = Constant::getNullValue(CurTy);
break;
case bitc::CST_CODE_INTEGER: // INTEGER: [intval]
- if (!CurTy->isIntegerTy() || Record.empty())
+ if (!CurTy->isIntOrIntVectorTy() || Record.empty())
return error("Invalid integer const record");
V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0]));
break;
case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval]
- if (!CurTy->isIntegerTy() || Record.empty())
+ if (!CurTy->isIntOrIntVectorTy() || Record.empty())
return error("Invalid wide integer const record");
- APInt VInt =
- readWideAPInt(Record, cast<IntegerType>(CurTy)->getBitWidth());
- V = ConstantInt::get(Context, VInt);
-
+ auto *ScalarTy = cast<IntegerType>(CurTy->getScalarType());
+ APInt VInt = readWideAPInt(Record, ScalarTy->getBitWidth());
+ V = ConstantInt::get(CurTy, VInt);
break;
}
case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval]
if (Record.empty())
return error("Invalid float const record");
- if (CurTy->isHalfTy())
- V = ConstantFP::get(Context, APFloat(APFloat::IEEEhalf(),
- APInt(16, (uint16_t)Record[0])));
- else if (CurTy->isBFloatTy())
- V = ConstantFP::get(Context, APFloat(APFloat::BFloat(),
- APInt(16, (uint32_t)Record[0])));
- else if (CurTy->isFloatTy())
- V = ConstantFP::get(Context, APFloat(APFloat::IEEEsingle(),
- APInt(32, (uint32_t)Record[0])));
- else if (CurTy->isDoubleTy())
- V = ConstantFP::get(Context, APFloat(APFloat::IEEEdouble(),
- APInt(64, Record[0])));
- else if (CurTy->isX86_FP80Ty()) {
+
+ auto *ScalarTy = CurTy->getScalarType();
+ if (ScalarTy->isHalfTy())
+ V = ConstantFP::get(CurTy, APFloat(APFloat::IEEEhalf(),
+ APInt(16, (uint16_t)Record[0])));
+ else if (ScalarTy->isBFloatTy())
+ V = ConstantFP::get(
+ CurTy, APFloat(APFloat::BFloat(), APInt(16, (uint32_t)Record[0])));
+ else if (ScalarTy->isFloatTy())
+ V = ConstantFP::get(CurTy, APFloat(APFloat::IEEEsingle(),
+ APInt(32, (uint32_t)Record[0])));
+ else if (ScalarTy->isDoubleTy())
+ V = ConstantFP::get(
+ CurTy, APFloat(APFloat::IEEEdouble(), APInt(64, Record[0])));
+ else if (ScalarTy->isX86_FP80Ty()) {
// Bits are not stored the same way as a normal i80 APInt, compensate.
uint64_t Rearrange[2];
Rearrange[0] = (Record[1] & 0xffffLL) | (Record[0] << 16);
Rearrange[1] = Record[0] >> 48;
- V = ConstantFP::get(Context, APFloat(APFloat::x87DoubleExtended(),
- APInt(80, Rearrange)));
- } else if (CurTy->isFP128Ty())
- V = ConstantFP::get(Context, APFloat(APFloat::IEEEquad(),
- APInt(128, Record)));
- else if (CurTy->isPPC_FP128Ty())
- V = ConstantFP::get(Context, APFloat(APFloat::PPCDoubleDouble(),
- APInt(128, Record)));
+ V = ConstantFP::get(
+ CurTy, APFloat(APFloat::x87DoubleExtended(), APInt(80, Rearrange)));
+ } else if (ScalarTy->isFP128Ty())
+ V = ConstantFP::get(CurTy,
+ APFloat(APFloat::IEEEquad(), APInt(128, Record)));
+ else if (ScalarTy->isPPC_FP128Ty())
+ V = ConstantFP::get(
+ CurTy, APFloat(APFloat::PPCDoubleDouble(), APInt(128, Record)));
else
V = UndefValue::get(CurTy);
break;
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index 13be0b0c3307..656f2a6ce870 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -2624,7 +2624,7 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
}
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
Code = bitc::CST_CODE_FLOAT;
- Type *Ty = CFP->getType();
+ Type *Ty = CFP->getType()->getScalarType();
if (Ty->isHalfTy() || Ty->isBFloatTy() || Ty->isFloatTy() ||
Ty->isDoubleTy()) {
Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
diff --git a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
index 7b66a851db25..3b84624c3d4d 100644
--- a/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
+++ b/llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp
@@ -829,11 +829,7 @@ class MemLocFragmentFill {
void process(BasicBlock &BB, VarFragMap &LiveSet) {
BBInsertBeforeMap[&BB].clear();
for (auto &I : BB) {
- for (DbgRecord &DR : I.getDbgValueRange()) {
- // FIXME: DPValue::filter usage needs attention in this file; we need
- // to make sure dbg.labels are handled correctly in RemoveDIs mode.
- // Cast below to ensure this gets fixed when DPLabels are introduced.
- DPValue &DPV = cast<DPValue>(DR);
+ for (DPValue &DPV : DPValue::filter(I.getDbgValueRange())) {
if (const auto *Locs = FnVarLocs->getWedge(&DPV)) {
for (const VarLocInfo &Loc : *Locs) {
addDef(Loc, &DPV, *I.getParent(), LiveSet);
@@ -1919,6 +1915,9 @@ void AssignmentTrackingLowering::process(BasicBlock &BB, BlockInfo *LiveSet) {
// attached DPValues, or a non-debug instruction with attached unprocessed
// DPValues.
if (II != EI && II->hasDbgValues()) {
+ // Skip over non-variable debug records (i.e., labels). They're going to
+ // be read from IR (possibly re-ordering them within the debug record
+ // range) rather than from the analysis results.
for (DPValue &DPV : DPValue::filter(II->getDbgValueRange())) {
resetInsertionPoint(DPV);
processDPValue(DPV, LiveSet);
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 4036f18dbc67..feefe87f4063 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2686,8 +2686,9 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
attributesPermitTailCall(F, CI, RetI, *TLI)) {
// Either we return void or the return value must be the first
// argument of a known intrinsic or library function.
- if (!V || (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
- V == CI->getArgOperand(0))) {
+ if (!V || isa<UndefValue>(V) ||
+ (isIntrinsicOrLFToBeTailCalled(TLInfo, CI) &&
+ V == CI->getArgOperand(0))) {
TailCallBBs.push_back(Pred);
}
}
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 3bd1542eeb74..77dc265d795d 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -187,7 +187,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
if (!lowerCall(MIRBuilder, Info))
return false;
- if (ReturnHintAlignReg && !Info.IsTailCall) {
+ if (ReturnHintAlignReg && !Info.LoweredTailCall) {
MIRBuilder.buildAssertAlign(ResRegs[0], ReturnHintAlignReg,
ReturnHintAlign);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 7c95cef2eeb7..38bb808dd5bd 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -3275,7 +3275,17 @@ void IRTranslator::translateDbgDeclareRecord(Value *Address, bool HasArgList,
void IRTranslator::translateDbgInfo(const Instruction &Inst,
MachineIRBuilder &MIRBuilder) {
- for (DPValue &DPV : DPValue::filter(Inst.getDbgValueRange())) {
+ for (DbgRecord &DR : Inst.getDbgValueRange()) {
+ if (DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
+ MIRBuilder.setDebugLoc(DPL->getDebugLoc());
+ assert(DPL->getLabel() && "Missing label");
+ assert(DPL->getLabel()->isValidLocationForIntrinsic(
+ MIRBuilder.getDebugLoc()) &&
+ "Expected inlined-at fields to agree");
+ MIRBuilder.buildDbgLabel(DPL->getLabel());
+ continue;
+ }
+ DPValue &DPV = cast<DPValue>(DR);
const DILocalVariable *Variable = DPV.getVariable();
const DIExpression *Expression = DPV.getExpression();
Value *V = DPV.getVariableLocationOp(0);
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 26fd12f9e51c..23ad68b331c9 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -660,8 +660,11 @@ std::optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode,
default:
break;
case TargetOpcode::G_ADD:
- case TargetOpcode::G_PTR_ADD:
return C1 + C2;
+ case TargetOpcode::G_PTR_ADD:
+ // Types can be of different width here.
+ // Result needs to be the same width as C1, so trunc or sext C2.
+ return C1 + C2.sextOrTrunc(C1.getBitWidth());
case TargetOpcode::G_AND:
return C1 & C2;
case TargetOpcode::G_ASHR:
diff --git a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
index 38c1c56d2823..0ddd94589699 100644
--- a/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
+++ b/llvm/lib/CodeGen/MachineFunctionSplitter.cpp
@@ -109,12 +109,6 @@ static bool isColdBlock(const MachineBasicBlock &MBB,
const MachineBlockFrequencyInfo *MBFI,
ProfileSummaryInfo *PSI) {
std::optional<uint64_t> Count = MBFI->getBlockProfileCount(&MBB);
-
- // Temporary hack to cope with AArch64's jump table encoding
- const TargetInstrInfo &TII = *MBB.getParent()->getSubtarget().getInstrInfo();
- if (!TII.isMBBSafeToSplitToCold(MBB))
- return false;
-
// For instrumentation profiles and sample profiles, we use different ways
// to judge whether a block is cold and should be split.
if (PSI->hasInstrumentationProfile() || PSI->hasCSInstrumentationProfile()) {
@@ -178,7 +172,8 @@ bool MachineFunctionSplitter::runOnMachineFunction(MachineFunction &MF) {
if (MBB.isEHPad())
LandingPads.push_back(&MBB);
- else if (UseProfileData && isColdBlock(MBB, MBFI, PSI) && !SplitAllEHCode)
+ else if (UseProfileData && isColdBlock(MBB, MBFI, PSI) &&
+ TII.isMBBSafeToSplitToCold(MBB) && !SplitAllEHCode)
MBB.setSectionID(MBBSectionID::ColdSectionID);
}
@@ -190,7 +185,7 @@ bool MachineFunctionSplitter::runOnMachineFunction(MachineFunction &MF) {
// Here we have UseProfileData == true.
bool HasHotLandingPads = false;
for (const MachineBasicBlock *LP : LandingPads) {
- if (!isColdBlock(*LP, MBFI, PSI))
+ if (!isColdBlock(*LP, MBFI, PSI) || !TII.isMBBSafeToSplitToCold(*LP))
HasHotLandingPads = true;
}
if (!HasHotLandingPads) {
diff --git a/llvm/lib/CodeGen/MachinePassManager.cpp b/llvm/lib/CodeGen/MachinePassManager.cpp
index d42bbe239830..9a750b5bed43 100644
--- a/llvm/lib/CodeGen/MachinePassManager.cpp
+++ b/llvm/lib/CodeGen/MachinePassManager.cpp
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/MachinePassManager.h"
-#include "llvm/CodeGen/FreeMachineFunction.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/PassManagerImpl.h"
@@ -19,99 +18,121 @@
using namespace llvm;
namespace llvm {
-template class AllAnalysesOn<MachineFunction>;
+
+AnalysisKey FunctionAnalysisManagerMachineFunctionProxy::Key;
+
template class AnalysisManager<MachineFunction>;
template class PassManager<MachineFunction>;
+template class InnerAnalysisManagerProxy<MachineFunctionAnalysisManager,
+ Module>;
+template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
+ MachineFunction>;
+
+bool FunctionAnalysisManagerMachineFunctionProxy::Result::invalidate(
+ MachineFunction &IR, const PreservedAnalyses &PA,
+ MachineFunctionAnalysisManager::Invalidator &Inv) {
+ // MachineFunction passes should not invalidate Function analyses.
+ // TODO: verify that PA doesn't invalidate Function analyses.
+ return false;
+}
-Error MachineFunctionPassManager::run(Module &M,
- MachineFunctionAnalysisManager &MFAM) {
- // MachineModuleAnalysis is a module analysis pass that is never invalidated
- // because we don't run any module pass in codegen pipeline. This is very
- // important because the codegen state is stored in MMI which is the analysis
- // result of MachineModuleAnalysis. MMI should not be recomputed.
- auto &MMI = MFAM.getResult<MachineModuleAnalysis>(M).getMMI();
-
- (void)RequireCodeGenSCCOrder;
- assert(!RequireCodeGenSCCOrder && "not implemented");
-
- // M is unused here
- PassInstrumentation PI = MFAM.getResult<PassInstrumentationAnalysis>(M);
-
- // Add a PIC to verify machine functions.
- if (VerifyMachineFunction) {
- // No need to pop this callback later since MIR pipeline is flat which means
- // current pipeline is the top-level pipeline. Callbacks are not used after
- // current pipeline.
- PI.pushBeforeNonSkippedPassCallback([](StringRef PassID, Any IR) {
- assert(llvm::any_cast<const MachineFunction *>(&IR));
- const MachineFunction *MF = llvm::any_cast<const MachineFunction *>(IR);
- assert(MF && "Machine function should be valid for printing");
- std::string Banner = std::string("After ") + std::string(PassID);
- verifyMachineFunction(Banner, *MF);
- });
+template <>
+bool MachineFunctionAnalysisManagerModuleProxy::Result::invalidate(
+ Module &M, const PreservedAnalyses &PA,
+ ModuleAnalysisManager::Invalidator &Inv) {
+ // If literally everything is preserved, we're done.
+ if (PA.areAllPreserved())
+ return false; // This is still a valid proxy.
+
+ // If this proxy isn't marked as preserved, then even if the result remains
+ // valid, the key itself may no longer be valid, so we clear everything.
+ //
+ // Note that in order to preserve this proxy, a module pass must ensure that
+ // the MFAM has been completely updated to handle the deletion of functions.
+ // Specifically, any MFAM-cached results for those functions need to have been
+ // forcibly cleared. When preserved, this proxy will only invalidate results
+ // cached on functions *still in the module* at the end of the module pass.
+ auto PAC = PA.getChecker<MachineFunctionAnalysisManagerModuleProxy>();
+ if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
+ InnerAM->clear();
+ return true;
}
- for (auto &F : InitializationFuncs) {
- if (auto Err = F(M, MFAM))
- return Err;
+ // FIXME: be more precise, see
+ // FunctionAnalysisManagerModuleProxy::Result::invalidate.
+ if (!PA.allAnalysesInSetPreserved<AllAnalysesOn<MachineFunction>>()) {
+ InnerAM->clear();
+ return true;
}
- unsigned Idx = 0;
- size_t Size = Passes.size();
- do {
- // Run machine module passes
- for (; MachineModulePasses.count(Idx) && Idx != Size; ++Idx) {
- if (!PI.runBeforePass<Module>(*Passes[Idx], M))
- continue;
- if (auto Err = MachineModulePasses.at(Idx)(M, MFAM))
- return Err;
- PI.runAfterPass(*Passes[Idx], M, PreservedAnalyses::all());
- }
-
- // Finish running all passes.
- if (Idx == Size)
- break;
-
- // Run machine function passes
-
- // Get index range of machine function passes.
- unsigned Begin = Idx;
- for (; !MachineModulePasses.count(Idx) && Idx != Size; ++Idx)
- ;
-
- for (Function &F : M) {
- // Do not codegen any 'available_externally' functions at all, they have
- // definitions outside the translation unit.
- if (F.hasAvailableExternallyLinkage())
- continue;
-
- MachineFunction &MF = MMI.getOrCreateMachineFunction(F);
-
- for (unsigned I = Begin, E = Idx; I != E; ++I) {
- auto *P = Passes[I].get();
+ // Return false to indicate that this result is still a valid proxy.
+ return false;
+}
- if (!PI.runBeforePass<MachineFunction>(*P, MF))
- continue;
+PreservedAnalyses
+ModuleToMachineFunctionPassAdaptor::run(Module &M, ModuleAnalysisManager &AM) {
+ auto &MMI = AM.getResult<MachineModuleAnalysis>(M).getMMI();
+ MachineFunctionAnalysisManager &MFAM =
+ AM.getResult<MachineFunctionAnalysisManagerModuleProxy>(M).getManager();
+ PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(M);
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (Function &F : M) {
+ // Do not codegen any 'available_externally' functions at all, they have
+ // definitions outside the translation unit.
+ if (F.hasAvailableExternallyLinkage())
+ continue;
+
+ MachineFunction &MF = MMI.getOrCreateMachineFunction(F);
+
+ if (!PI.runBeforePass<MachineFunction>(*Pass, MF))
+ continue;
+ PreservedAnalyses PassPA = Pass->run(MF, MFAM);
+ if (MMI.getMachineFunction(F)) {
+ MFAM.invalidate(MF, PassPA);
+ PI.runAfterPass(*Pass, MF, PassPA);
+ } else {
+ MFAM.clear(MF, F.getName());
+ PI.runAfterPassInvalidated<MachineFunction>(*Pass, PassPA);
+ }
+ PA.intersect(std::move(PassPA));
+ }
- // TODO: EmitSizeRemarks
- PreservedAnalyses PassPA = P->run(MF, MFAM);
+ return PA;
+}
- // MF is dangling after FreeMachineFunctionPass
- if (P->name() != FreeMachineFunctionPass::name()) {
- MFAM.invalidate(MF, PassPA);
+void ModuleToMachineFunctionPassAdaptor::printPipeline(
+ raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
+ OS << "machine-function(";
+ Pass->printPipeline(OS, MapClassName2PassName);
+ OS << ')';
+}
- PI.runAfterPass(*P, MF, PassPA);
- }
- }
+template <>
+PreservedAnalyses
+PassManager<MachineFunction>::run(MachineFunction &MF,
+ AnalysisManager<MachineFunction> &MFAM) {
+ PassInstrumentation PI = MFAM.getResult<PassInstrumentationAnalysis>(MF);
+ Function &F = MF.getFunction();
+ MachineModuleInfo &MMI =
+ MFAM.getResult<ModuleAnalysisManagerMachineFunctionProxy>(MF)
+ .getCachedResult<MachineModuleAnalysis>(*F.getParent())
+ ->getMMI();
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (auto &Pass : Passes) {
+ if (!PI.runBeforePass<MachineFunction>(*Pass, MF))
+ continue;
+
+ PreservedAnalyses PassPA = Pass->run(MF, MFAM);
+ if (MMI.getMachineFunction(F)) {
+ MFAM.invalidate(MF, PassPA);
+ PI.runAfterPass(*Pass, MF, PassPA);
+ } else {
+ MFAM.clear(MF, F.getName());
+ PI.runAfterPassInvalidated<MachineFunction>(*Pass, PassPA);
}
- } while (true);
-
- for (auto &F : FinalizationFuncs) {
- if (auto Err = F(M, MFAM))
- return Err;
+ PA.intersect(std::move(PassPA));
}
-
- return Error::success();
+ return PA;
}
} // namespace llvm
diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp
index 697e0da09442..1bda19b2e6e9 100644
--- a/llvm/lib/CodeGen/MachinePipeliner.cpp
+++ b/llvm/lib/CodeGen/MachinePipeliner.cpp
@@ -768,7 +768,6 @@ static void getUnderlyingObjects(const MachineInstr *MI,
Objs.clear();
return;
}
- Objs.push_back(V);
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 89ef648ee7d7..6a28bc8da223 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -76,6 +76,8 @@
#include <utility>
#include <variant>
+#include "MatchContext.h"
+
using namespace llvm;
#define DEBUG_TYPE "dagcombine"
@@ -888,141 +890,6 @@ public:
void NodeInserted(SDNode *N) override { DC.ConsiderForPruning(N); }
};
-class EmptyMatchContext {
- SelectionDAG &DAG;
- const TargetLowering &TLI;
-
-public:
- EmptyMatchContext(SelectionDAG &DAG, const TargetLowering &TLI, SDNode *Root)
- : DAG(DAG), TLI(TLI) {}
-
- bool match(SDValue OpN, unsigned Opcode) const {
- return Opcode == OpN->getOpcode();
- }
-
- // Same as SelectionDAG::getNode().
- template <typename... ArgT> SDValue getNode(ArgT &&...Args) {
- return DAG.getNode(std::forward<ArgT>(Args)...);
- }
-
- bool isOperationLegalOrCustom(unsigned Op, EVT VT,
- bool LegalOnly = false) const {
- return TLI.isOperationLegalOrCustom(Op, VT, LegalOnly);
- }
-};
-
-class VPMatchContext {
- SelectionDAG &DAG;
- const TargetLowering &TLI;
- SDValue RootMaskOp;
- SDValue RootVectorLenOp;
-
-public:
- VPMatchContext(SelectionDAG &DAG, const TargetLowering &TLI, SDNode *Root)
- : DAG(DAG), TLI(TLI), RootMaskOp(), RootVectorLenOp() {
- assert(Root->isVPOpcode());
- if (auto RootMaskPos = ISD::getVPMaskIdx(Root->getOpcode()))
- RootMaskOp = Root->getOperand(*RootMaskPos);
- else if (Root->getOpcode() == ISD::VP_SELECT)
- RootMaskOp = DAG.getAllOnesConstant(SDLoc(Root),
- Root->getOperand(0).getValueType());
-
- if (auto RootVLenPos =
- ISD::getVPExplicitVectorLengthIdx(Root->getOpcode()))
- RootVectorLenOp = Root->getOperand(*RootVLenPos);
- }
-
- /// whether \p OpVal is a node that is functionally compatible with the
- /// NodeType \p Opc
- bool match(SDValue OpVal, unsigned Opc) const {
- if (!OpVal->isVPOpcode())
- return OpVal->getOpcode() == Opc;
-
- auto BaseOpc = ISD::getBaseOpcodeForVP(OpVal->getOpcode(),
- !OpVal->getFlags().hasNoFPExcept());
- if (BaseOpc != Opc)
- return false;
-
- // Make sure the mask of OpVal is true mask or is same as Root's.
- unsigned VPOpcode = OpVal->getOpcode();
- if (auto MaskPos = ISD::getVPMaskIdx(VPOpcode)) {
- SDValue MaskOp = OpVal.getOperand(*MaskPos);
- if (RootMaskOp != MaskOp &&
- !ISD::isConstantSplatVectorAllOnes(MaskOp.getNode()))
- return false;
- }
-
- // Make sure the EVL of OpVal is same as Root's.
- if (auto VLenPos = ISD::getVPExplicitVectorLengthIdx(VPOpcode))
- if (RootVectorLenOp != OpVal.getOperand(*VLenPos))
- return false;
- return true;
- }
-
- // Specialize based on number of operands.
- // TODO emit VP intrinsics where MaskOp/VectorLenOp != null
- // SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { return
- // DAG.getNode(Opcode, DL, VT); }
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand) {
- unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
- assert(ISD::getVPMaskIdx(VPOpcode) == 1 &&
- ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 2);
- return DAG.getNode(VPOpcode, DL, VT,
- {Operand, RootMaskOp, RootVectorLenOp});
- }
-
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2) {
- unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
- assert(ISD::getVPMaskIdx(VPOpcode) == 2 &&
- ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 3);
- return DAG.getNode(VPOpcode, DL, VT,
- {N1, N2, RootMaskOp, RootVectorLenOp});
- }
-
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, SDValue N3) {
- unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
- assert(ISD::getVPMaskIdx(VPOpcode) == 3 &&
- ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 4);
- return DAG.getNode(VPOpcode, DL, VT,
- {N1, N2, N3, RootMaskOp, RootVectorLenOp});
- }
-
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
- SDNodeFlags Flags) {
- unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
- assert(ISD::getVPMaskIdx(VPOpcode) == 1 &&
- ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 2);
- return DAG.getNode(VPOpcode, DL, VT, {Operand, RootMaskOp, RootVectorLenOp},
- Flags);
- }
-
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, SDNodeFlags Flags) {
- unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
- assert(ISD::getVPMaskIdx(VPOpcode) == 2 &&
- ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 3);
- return DAG.getNode(VPOpcode, DL, VT, {N1, N2, RootMaskOp, RootVectorLenOp},
- Flags);
- }
-
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, SDValue N3, SDNodeFlags Flags) {
- unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
- assert(ISD::getVPMaskIdx(VPOpcode) == 3 &&
- ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 4);
- return DAG.getNode(VPOpcode, DL, VT,
- {N1, N2, N3, RootMaskOp, RootVectorLenOp}, Flags);
- }
-
- bool isOperationLegalOrCustom(unsigned Op, EVT VT,
- bool LegalOnly = false) const {
- unsigned VPOp = ISD::getVPForBaseOpcode(Op);
- return TLI.isOperationLegalOrCustom(VPOp, VT, LegalOnly);
- }
-};
-
} // end anonymous namespace
//===----------------------------------------------------------------------===//
@@ -13997,6 +13864,13 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
if (SDValue Res = tryToFoldExtendSelectLoad(N, TLI, DAG, Level))
return Res;
+ // CSE zext nneg with sext if the zext is not free.
+ if (N->getFlags().hasNonNeg() && !TLI.isZExtFree(N0.getValueType(), VT)) {
+ SDNode *CSENode = DAG.getNodeIfExists(ISD::SIGN_EXTEND, N->getVTList(), N0);
+ if (CSENode)
+ return SDValue(CSENode, 0);
+ }
+
return SDValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
index 5651498dd3f5..246762dd7ab6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -1188,11 +1188,24 @@ void FastISel::handleDbgInfo(const Instruction *II) {
MIMD = MIMetadata();
// Reverse order of debug records, because fast-isel walks through backwards.
- for (DbgRecord &DPR : llvm::reverse(II->getDbgValueRange())) {
+ for (DbgRecord &DR : llvm::reverse(II->getDbgValueRange())) {
flushLocalValueMap();
recomputeInsertPt();
- DPValue &DPV = cast<DPValue>(DPR);
+ if (DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
+ assert(DPL->getLabel() && "Missing label");
+ if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
+ LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DPL << "\n");
+ continue;
+ }
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DPL->getDebugLoc(),
+ TII.get(TargetOpcode::DBG_LABEL))
+ .addMetadata(DPL->getLabel());
+ continue;
+ }
+
+ DPValue &DPV = cast<DPValue>(DR);
Value *V = nullptr;
if (!DPV.hasArgList())
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index a4ba261686c6..df17d6530b0d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -217,7 +217,15 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SSUBSAT:
case ISD::USUBSAT:
case ISD::SSHLSAT:
- case ISD::USHLSAT: Res = PromoteIntRes_ADDSUBSHLSAT(N); break;
+ case ISD::USHLSAT:
+ Res = PromoteIntRes_ADDSUBSHLSAT<EmptyMatchContext>(N);
+ break;
+ case ISD::VP_SADDSAT:
+ case ISD::VP_UADDSAT:
+ case ISD::VP_SSUBSAT:
+ case ISD::VP_USUBSAT:
+ Res = PromoteIntRes_ADDSUBSHLSAT<VPMatchContext>(N);
+ break;
case ISD::SMULFIX:
case ISD::SMULFIXSAT:
@@ -934,6 +942,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Overflow(SDNode *N) {
return DAG.getBoolExtOrTrunc(Res.getValue(1), dl, NVT, VT);
}
+template <class MatchContextClass>
SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
// If the promoted type is legal, we can convert this to:
// 1. ANY_EXTEND iN to iM
@@ -945,11 +954,13 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
SDLoc dl(N);
SDValue Op1 = N->getOperand(0);
SDValue Op2 = N->getOperand(1);
+ MatchContextClass matcher(DAG, TLI, N);
unsigned OldBits = Op1.getScalarValueSizeInBits();
- unsigned Opcode = N->getOpcode();
+ unsigned Opcode = matcher.getRootBaseOpcode();
bool IsShift = Opcode == ISD::USHLSAT || Opcode == ISD::SSHLSAT;
+ // FIXME: We need vp-aware PromotedInteger functions.
SDValue Op1Promoted, Op2Promoted;
if (IsShift) {
Op1Promoted = GetPromotedInteger(Op1);
@@ -968,18 +979,18 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
APInt MaxVal = APInt::getAllOnes(OldBits).zext(NewBits);
SDValue SatMax = DAG.getConstant(MaxVal, dl, PromotedType);
SDValue Add =
- DAG.getNode(ISD::ADD, dl, PromotedType, Op1Promoted, Op2Promoted);
- return DAG.getNode(ISD::UMIN, dl, PromotedType, Add, SatMax);
+ matcher.getNode(ISD::ADD, dl, PromotedType, Op1Promoted, Op2Promoted);
+ return matcher.getNode(ISD::UMIN, dl, PromotedType, Add, SatMax);
}
// USUBSAT can always be promoted as long as we have zero-extended the args.
if (Opcode == ISD::USUBSAT)
- return DAG.getNode(ISD::USUBSAT, dl, PromotedType, Op1Promoted,
- Op2Promoted);
+ return matcher.getNode(ISD::USUBSAT, dl, PromotedType, Op1Promoted,
+ Op2Promoted);
// Shift cannot use a min/max expansion, we can't detect overflow if all of
// the bits have been shifted out.
- if (IsShift || TLI.isOperationLegal(Opcode, PromotedType)) {
+ if (IsShift || matcher.isOperationLegal(Opcode, PromotedType)) {
unsigned ShiftOp;
switch (Opcode) {
case ISD::SADDSAT:
@@ -1002,11 +1013,11 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
DAG.getNode(ISD::SHL, dl, PromotedType, Op1Promoted, ShiftAmount);
if (!IsShift)
Op2Promoted =
- DAG.getNode(ISD::SHL, dl, PromotedType, Op2Promoted, ShiftAmount);
+ matcher.getNode(ISD::SHL, dl, PromotedType, Op2Promoted, ShiftAmount);
SDValue Result =
- DAG.getNode(Opcode, dl, PromotedType, Op1Promoted, Op2Promoted);
- return DAG.getNode(ShiftOp, dl, PromotedType, Result, ShiftAmount);
+ matcher.getNode(Opcode, dl, PromotedType, Op1Promoted, Op2Promoted);
+ return matcher.getNode(ShiftOp, dl, PromotedType, Result, ShiftAmount);
}
unsigned AddOp = Opcode == ISD::SADDSAT ? ISD::ADD : ISD::SUB;
@@ -1015,9 +1026,9 @@ SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBSHLSAT(SDNode *N) {
SDValue SatMin = DAG.getConstant(MinVal, dl, PromotedType);
SDValue SatMax = DAG.getConstant(MaxVal, dl, PromotedType);
SDValue Result =
- DAG.getNode(AddOp, dl, PromotedType, Op1Promoted, Op2Promoted);
- Result = DAG.getNode(ISD::SMIN, dl, PromotedType, Result, SatMax);
- Result = DAG.getNode(ISD::SMAX, dl, PromotedType, Result, SatMin);
+ matcher.getNode(AddOp, dl, PromotedType, Op1Promoted, Op2Promoted);
+ Result = matcher.getNode(ISD::SMIN, dl, PromotedType, Result, SatMax);
+ Result = matcher.getNode(ISD::SMAX, dl, PromotedType, Result, SatMin);
return Result;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 911498716285..3c84f67653ec 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -15,6 +15,7 @@
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_LEGALIZETYPES_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_LEGALIZETYPES_H
+#include "MatchContext.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLowering.h"
@@ -355,6 +356,7 @@ private:
SDValue PromoteIntRes_VAARG(SDNode *N);
SDValue PromoteIntRes_VSCALE(SDNode *N);
SDValue PromoteIntRes_XMULO(SDNode *N, unsigned ResNo);
+ template <class MatchContextClass>
SDValue PromoteIntRes_ADDSUBSHLSAT(SDNode *N);
SDValue PromoteIntRes_MULFIX(SDNode *N);
SDValue PromoteIntRes_DIVFIX(SDNode *N);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 2a7aaf88847e..6074498d9144 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -404,8 +404,6 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::FCEIL:
case ISD::FTRUNC:
case ISD::FRINT:
- case ISD::LRINT:
- case ISD::LLRINT:
case ISD::FNEARBYINT:
case ISD::FROUND:
case ISD::FROUNDEVEN:
@@ -455,6 +453,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Node->getValueType(0), Scale);
break;
}
+ case ISD::LRINT:
+ case ISD::LLRINT:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::VECREDUCE_ADD:
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 7fc252600534..90cda2a1155b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1163,10 +1163,10 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SMAX: case ISD::VP_SMAX:
case ISD::UMIN: case ISD::VP_UMIN:
case ISD::UMAX: case ISD::VP_UMAX:
- case ISD::SADDSAT:
- case ISD::UADDSAT:
- case ISD::SSUBSAT:
- case ISD::USUBSAT:
+ case ISD::SADDSAT: case ISD::VP_SADDSAT:
+ case ISD::UADDSAT: case ISD::VP_UADDSAT:
+ case ISD::SSUBSAT: case ISD::VP_SSUBSAT:
+ case ISD::USUBSAT: case ISD::VP_USUBSAT:
case ISD::SSHLSAT:
case ISD::USHLSAT:
case ISD::ROTL:
@@ -4140,10 +4140,10 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SMAX: case ISD::VP_SMAX:
case ISD::UMIN: case ISD::VP_UMIN:
case ISD::UMAX: case ISD::VP_UMAX:
- case ISD::UADDSAT:
- case ISD::SADDSAT:
- case ISD::USUBSAT:
- case ISD::SSUBSAT:
+ case ISD::UADDSAT: case ISD::VP_UADDSAT:
+ case ISD::SADDSAT: case ISD::VP_SADDSAT:
+ case ISD::USUBSAT: case ISD::VP_USUBSAT:
+ case ISD::SSUBSAT: case ISD::VP_SSUBSAT:
case ISD::SSHLSAT:
case ISD::USHLSAT:
case ISD::ROTL:
diff --git a/llvm/lib/CodeGen/SelectionDAG/MatchContext.h b/llvm/lib/CodeGen/SelectionDAG/MatchContext.h
new file mode 100644
index 000000000000..f965cb952f97
--- /dev/null
+++ b/llvm/lib/CodeGen/SelectionDAG/MatchContext.h
@@ -0,0 +1,175 @@
+//===---------------- llvm/CodeGen/MatchContext.h --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the EmptyMatchContext class and VPMatchContext class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_MATCHCONTEXT_H
+#define LLVM_LIB_CODEGEN_SELECTIONDAG_MATCHCONTEXT_H
+
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetLowering.h"
+
+using namespace llvm;
+
+namespace {
+class EmptyMatchContext {
+ SelectionDAG &DAG;
+ const TargetLowering &TLI;
+ SDNode *Root;
+
+public:
+ EmptyMatchContext(SelectionDAG &DAG, const TargetLowering &TLI, SDNode *Root)
+ : DAG(DAG), TLI(TLI), Root(Root) {}
+
+ unsigned getRootBaseOpcode() { return Root->getOpcode(); }
+ bool match(SDValue OpN, unsigned Opcode) const {
+ return Opcode == OpN->getOpcode();
+ }
+
+ // Same as SelectionDAG::getNode().
+ template <typename... ArgT> SDValue getNode(ArgT &&...Args) {
+ return DAG.getNode(std::forward<ArgT>(Args)...);
+ }
+
+ bool isOperationLegal(unsigned Op, EVT VT) const {
+ return TLI.isOperationLegal(Op, VT);
+ }
+
+ bool isOperationLegalOrCustom(unsigned Op, EVT VT,
+ bool LegalOnly = false) const {
+ return TLI.isOperationLegalOrCustom(Op, VT, LegalOnly);
+ }
+};
+
+class VPMatchContext {
+ SelectionDAG &DAG;
+ const TargetLowering &TLI;
+ SDValue RootMaskOp;
+ SDValue RootVectorLenOp;
+ SDNode *Root;
+
+public:
+ VPMatchContext(SelectionDAG &DAG, const TargetLowering &TLI, SDNode *_Root)
+ : DAG(DAG), TLI(TLI), RootMaskOp(), RootVectorLenOp() {
+ Root = _Root;
+ assert(Root->isVPOpcode());
+ if (auto RootMaskPos = ISD::getVPMaskIdx(Root->getOpcode()))
+ RootMaskOp = Root->getOperand(*RootMaskPos);
+ else if (Root->getOpcode() == ISD::VP_SELECT)
+ RootMaskOp = DAG.getAllOnesConstant(SDLoc(Root),
+ Root->getOperand(0).getValueType());
+
+ if (auto RootVLenPos = ISD::getVPExplicitVectorLengthIdx(Root->getOpcode()))
+ RootVectorLenOp = Root->getOperand(*RootVLenPos);
+ }
+
+ unsigned getRootBaseOpcode() {
+ std::optional<unsigned> Opcode = ISD::getBaseOpcodeForVP(
+ Root->getOpcode(), !Root->getFlags().hasNoFPExcept());
+ assert(Opcode.has_value());
+ return *Opcode;
+ }
+
+ /// whether \p OpVal is a node that is functionally compatible with the
+ /// NodeType \p Opc
+ bool match(SDValue OpVal, unsigned Opc) const {
+ if (!OpVal->isVPOpcode())
+ return OpVal->getOpcode() == Opc;
+
+ auto BaseOpc = ISD::getBaseOpcodeForVP(OpVal->getOpcode(),
+ !OpVal->getFlags().hasNoFPExcept());
+ if (BaseOpc != Opc)
+ return false;
+
+ // Make sure the mask of OpVal is true mask or is same as Root's.
+ unsigned VPOpcode = OpVal->getOpcode();
+ if (auto MaskPos = ISD::getVPMaskIdx(VPOpcode)) {
+ SDValue MaskOp = OpVal.getOperand(*MaskPos);
+ if (RootMaskOp != MaskOp &&
+ !ISD::isConstantSplatVectorAllOnes(MaskOp.getNode()))
+ return false;
+ }
+
+ // Make sure the EVL of OpVal is same as Root's.
+ if (auto VLenPos = ISD::getVPExplicitVectorLengthIdx(VPOpcode))
+ if (RootVectorLenOp != OpVal.getOperand(*VLenPos))
+ return false;
+ return true;
+ }
+
+ // Specialize based on number of operands.
+ // TODO emit VP intrinsics where MaskOp/VectorLenOp != null
+ // SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT) { return
+ // DAG.getNode(Opcode, DL, VT); }
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand) {
+ unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
+ assert(ISD::getVPMaskIdx(VPOpcode) == 1 &&
+ ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 2);
+ return DAG.getNode(VPOpcode, DL, VT,
+ {Operand, RootMaskOp, RootVectorLenOp});
+ }
+
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+ SDValue N2) {
+ unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
+ assert(ISD::getVPMaskIdx(VPOpcode) == 2 &&
+ ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 3);
+ return DAG.getNode(VPOpcode, DL, VT, {N1, N2, RootMaskOp, RootVectorLenOp});
+ }
+
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+ SDValue N2, SDValue N3) {
+ unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
+ assert(ISD::getVPMaskIdx(VPOpcode) == 3 &&
+ ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 4);
+ return DAG.getNode(VPOpcode, DL, VT,
+ {N1, N2, N3, RootMaskOp, RootVectorLenOp});
+ }
+
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
+ SDNodeFlags Flags) {
+ unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
+ assert(ISD::getVPMaskIdx(VPOpcode) == 1 &&
+ ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 2);
+ return DAG.getNode(VPOpcode, DL, VT, {Operand, RootMaskOp, RootVectorLenOp},
+ Flags);
+ }
+
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+ SDValue N2, SDNodeFlags Flags) {
+ unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
+ assert(ISD::getVPMaskIdx(VPOpcode) == 2 &&
+ ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 3);
+ return DAG.getNode(VPOpcode, DL, VT, {N1, N2, RootMaskOp, RootVectorLenOp},
+ Flags);
+ }
+
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
+ SDValue N2, SDValue N3, SDNodeFlags Flags) {
+ unsigned VPOpcode = ISD::getVPForBaseOpcode(Opcode);
+ assert(ISD::getVPMaskIdx(VPOpcode) == 3 &&
+ ISD::getVPExplicitVectorLengthIdx(VPOpcode) == 4);
+ return DAG.getNode(VPOpcode, DL, VT,
+ {N1, N2, N3, RootMaskOp, RootVectorLenOp}, Flags);
+ }
+
+ bool isOperationLegal(unsigned Op, EVT VT) const {
+ unsigned VPOp = ISD::getVPForBaseOpcode(Op);
+ return TLI.isOperationLegal(VPOp, VT);
+ }
+
+ bool isOperationLegalOrCustom(unsigned Op, EVT VT,
+ bool LegalOnly = false) const {
+ unsigned VPOp = ISD::getVPForBaseOpcode(Op);
+ return TLI.isOperationLegalOrCustom(VPOp, VT, LegalOnly);
+ }
+};
+} // end anonymous namespace
+#endif
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index add92cf8b31e..0ceda27d4066 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -9044,29 +9044,6 @@ SDValue SelectionDAG::getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl,
SDValue SelectionDAG::getStridedLoadVP(
ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL,
SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
- SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
- MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
- const MDNode *Ranges, bool IsExpanding) {
- assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
-
- MMOFlags |= MachineMemOperand::MOLoad;
- assert((MMOFlags & MachineMemOperand::MOStore) == 0);
- // If we don't have a PtrInfo, infer the trivial frame index case to simplify
- // clients.
- if (PtrInfo.V.isNull())
- PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
-
- uint64_t Size = MemoryLocation::UnknownSize;
- MachineFunction &MF = getMachineFunction();
- MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
- Alignment, AAInfo, Ranges);
- return getStridedLoadVP(AM, ExtType, VT, DL, Chain, Ptr, Offset, Stride, Mask,
- EVL, MemVT, MMO, IsExpanding);
-}
-
-SDValue SelectionDAG::getStridedLoadVP(
- ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL,
- SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding) {
bool Indexed = AM != ISD::UNINDEXED;
assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
@@ -9098,17 +9075,6 @@ SDValue SelectionDAG::getStridedLoadVP(
return V;
}
-SDValue SelectionDAG::getStridedLoadVP(
- EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Stride,
- SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, MaybeAlign Alignment,
- MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
- const MDNode *Ranges, bool IsExpanding) {
- SDValue Undef = getUNDEF(Ptr.getValueType());
- return getStridedLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, DL, Chain, Ptr,
- Undef, Stride, Mask, EVL, PtrInfo, VT, Alignment,
- MMOFlags, AAInfo, Ranges, IsExpanding);
-}
-
SDValue SelectionDAG::getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain,
SDValue Ptr, SDValue Stride,
SDValue Mask, SDValue EVL,
@@ -9121,18 +9087,6 @@ SDValue SelectionDAG::getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain,
SDValue SelectionDAG::getExtStridedLoadVP(
ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain,
- SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL,
- MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment,
- MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
- bool IsExpanding) {
- SDValue Undef = getUNDEF(Ptr.getValueType());
- return getStridedLoadVP(ISD::UNINDEXED, ExtType, VT, DL, Chain, Ptr, Undef,
- Stride, Mask, EVL, PtrInfo, MemVT, Alignment,
- MMOFlags, AAInfo, nullptr, IsExpanding);
-}
-
-SDValue SelectionDAG::getExtStridedLoadVP(
- ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT, SDValue Chain,
SDValue Ptr, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT,
MachineMemOperand *MMO, bool IsExpanding) {
SDValue Undef = getUNDEF(Ptr.getValueType());
@@ -9150,11 +9104,14 @@ SDValue SelectionDAG::getIndexedStridedLoadVP(SDValue OrigLoad, const SDLoc &DL,
auto MMOFlags =
SLD->getMemOperand()->getFlags() &
~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
- return getStridedLoadVP(
- AM, SLD->getExtensionType(), OrigLoad.getValueType(), DL, SLD->getChain(),
- Base, Offset, SLD->getStride(), SLD->getMask(), SLD->getVectorLength(),
- SLD->getPointerInfo(), SLD->getMemoryVT(), SLD->getAlign(), MMOFlags,
- SLD->getAAInfo(), nullptr, SLD->isExpandingLoad());
+ MachineFunction &MF = getMachineFunction();
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ SLD->getPointerInfo(), MMOFlags, SLD->getMemOperand()->getSize(),
+ SLD->getOriginalAlign(), SLD->getAAInfo());
+ return getStridedLoadVP(AM, SLD->getExtensionType(), OrigLoad.getValueType(),
+ DL, SLD->getChain(), Base, Offset, SLD->getStride(),
+ SLD->getMask(), SLD->getVectorLength(),
+ SLD->getMemoryVT(), MMO, SLD->isExpandingLoad());
}
SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL,
@@ -9193,26 +9150,6 @@ SDValue SelectionDAG::getStridedStoreVP(SDValue Chain, const SDLoc &DL,
return V;
}
-SDValue SelectionDAG::getTruncStridedStoreVP(
- SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Stride,
- SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT SVT,
- Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
- bool IsCompressing) {
- assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
-
- MMOFlags |= MachineMemOperand::MOStore;
- assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
-
- if (PtrInfo.V.isNull())
- PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
-
- MachineFunction &MF = getMachineFunction();
- MachineMemOperand *MMO = MF.getMachineMemOperand(
- PtrInfo, MMOFlags, MemoryLocation::UnknownSize, Alignment, AAInfo);
- return getTruncStridedStoreVP(Chain, DL, Val, Ptr, Stride, Mask, EVL, SVT,
- MMO, IsCompressing);
-}
-
SDValue SelectionDAG::getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL,
SDValue Val, SDValue Ptr,
SDValue Stride, SDValue Mask,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index e893a5b616d3..ee600d389c2c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1241,17 +1241,30 @@ void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
}
}
- // We must early-exit here to prevent any DPValues from being emitted below,
- // as we have just emitted the debug values resulting from assignment
- // tracking analysis, making any existing DPValues redundant (and probably
- // less correct).
- return;
}
+ // We must skip DPValues if they've already been processed above as we
+ // have just emitted the debug values resulting from assignment tracking
+ // analysis, making any existing DPValues redundant (and probably less
+ // correct). We still need to process DPLabels. This does sink DPLabels
+ // to the bottom of the group of debug records. That sholdn't be important
+ // as it does so deterministcally and ordering between DPLabels and DPValues
+ // is immaterial (other than for MIR/IR printing).
+ bool SkipDPValues = DAG.getFunctionVarLocs();
// Is there is any debug-info attached to this instruction, in the form of
- // DPValue non-instruction debug-info records.
- for (DbgRecord &DPR : I.getDbgValueRange()) {
- DPValue &DPV = cast<DPValue>(DPR);
+ // DbgRecord non-instruction debug-info records.
+ for (DbgRecord &DR : I.getDbgValueRange()) {
+ if (DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
+ assert(DPL->getLabel() && "Missing label");
+ SDDbgLabel *SDV =
+ DAG.getDbgLabel(DPL->getLabel(), DPL->getDebugLoc(), SDNodeOrder);
+ DAG.AddDbgLabel(SDV);
+ continue;
+ }
+
+ if (SkipDPValues)
+ continue;
+ DPValue &DPV = cast<DPValue>(DR);
DILocalVariable *Variable = DPV.getVariable();
DIExpression *Expression = DPV.getExpression();
dropDanglingDebugInfo(Variable, Expression);
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index a4c5167ade37..07fb89127a73 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -10948,12 +10948,10 @@ SDValue TargetLowering::expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const {
Op = expandRoundInexactToOdd(F32, Op, dl, DAG);
Op = DAG.getNode(ISD::BITCAST, dl, I32, Op);
- // Extract the sign bit and exponent.
- SDValue SignBitAndExponentField = DAG.getNode(
- ISD::AND, dl, I32, Op, DAG.getConstant(0xff800000, dl, I32));
- // Set the quiet bit.
- SDValue NaN = DAG.getNode(ISD::OR, dl, I32, SignBitAndExponentField,
- DAG.getConstant(0x400000, dl, I32));
+ // Conversions should set NaN's quiet bit. This also prevents NaNs from
+ // turning into infinities.
+ SDValue NaN =
+ DAG.getNode(ISD::OR, dl, I32, Op, DAG.getConstant(0x400000, dl, I32));
// Factor in the contribution of the low 16 bits.
SDValue One = DAG.getConstant(1, dl, I32);
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp b/llvm/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
index 78f819dd052a..9c65d85985f1 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
@@ -510,7 +510,7 @@ DWARFDebugNames::Abbrev DWARFDebugNames::AbbrevMapInfo::getTombstoneKey() {
Expected<DWARFDebugNames::AttributeEncoding>
DWARFDebugNames::NameIndex::extractAttributeEncoding(uint64_t *Offset) {
- if (*Offset >= EntriesBase) {
+ if (*Offset >= Offsets.EntriesBase) {
return createStringError(errc::illegal_byte_sequence,
"Incorrectly terminated abbreviation table.");
}
@@ -536,7 +536,7 @@ DWARFDebugNames::NameIndex::extractAttributeEncodings(uint64_t *Offset) {
Expected<DWARFDebugNames::Abbrev>
DWARFDebugNames::NameIndex::extractAbbrev(uint64_t *Offset) {
- if (*Offset >= EntriesBase) {
+ if (*Offset >= Offsets.EntriesBase) {
return createStringError(errc::illegal_byte_sequence,
"Incorrectly terminated abbreviation table.");
}
@@ -552,32 +552,50 @@ DWARFDebugNames::NameIndex::extractAbbrev(uint64_t *Offset) {
return Abbrev(Code, dwarf::Tag(Tag), AbbrevOffset, std::move(*AttrEncOr));
}
+void llvm::findDebugNamesOffsets(
+ DWARFDebugNames::DWARFDebugNamesOffsets &Offsets, uint64_t HdrSize,
+ dwarf::DwarfFormat Format, const DWARFDebugNames::Header &Hdr) {
+ uint32_t DwarfSize = (Format == llvm::dwarf::DwarfFormat::DWARF64) ? 8 : 4;
+ uint64_t Offset = HdrSize;
+ Offsets.CUsBase = Offset;
+ Offset += Hdr.CompUnitCount * DwarfSize;
+ Offset += Hdr.LocalTypeUnitCount * DwarfSize;
+ Offset += Hdr.ForeignTypeUnitCount * 8;
+
+ Offsets.BucketsBase = Offset;
+ Offset += Hdr.BucketCount * 4;
+
+ Offsets.HashesBase = Offset;
+ if (Hdr.BucketCount > 0)
+ Offset += Hdr.NameCount * 4;
+
+ Offsets.StringOffsetsBase = Offset;
+ Offset += Hdr.NameCount * DwarfSize;
+
+ Offsets.EntryOffsetsBase = Offset;
+ Offset += Hdr.NameCount * DwarfSize;
+
+ Offset += Hdr.AbbrevTableSize;
+ Offsets.EntriesBase = Offset;
+}
+
Error DWARFDebugNames::NameIndex::extract() {
const DWARFDataExtractor &AS = Section.AccelSection;
- uint64_t Offset = Base;
- if (Error E = Hdr.extract(AS, &Offset))
+ uint64_t hdrSize = Base;
+ if (Error E = Hdr.extract(AS, &hdrSize))
return E;
const unsigned SectionOffsetSize = dwarf::getDwarfOffsetByteSize(Hdr.Format);
- CUsBase = Offset;
- Offset += Hdr.CompUnitCount * SectionOffsetSize;
- Offset += Hdr.LocalTypeUnitCount * SectionOffsetSize;
- Offset += Hdr.ForeignTypeUnitCount * 8;
- BucketsBase = Offset;
- Offset += Hdr.BucketCount * 4;
- HashesBase = Offset;
- if (Hdr.BucketCount > 0)
- Offset += Hdr.NameCount * 4;
- StringOffsetsBase = Offset;
- Offset += Hdr.NameCount * SectionOffsetSize;
- EntryOffsetsBase = Offset;
- Offset += Hdr.NameCount * SectionOffsetSize;
+ findDebugNamesOffsets(Offsets, hdrSize, Hdr.Format, Hdr);
+
+ uint64_t Offset =
+ Offsets.EntryOffsetsBase + (Hdr.NameCount * SectionOffsetSize);
if (!AS.isValidOffsetForDataOfSize(Offset, Hdr.AbbrevTableSize))
return createStringError(errc::illegal_byte_sequence,
"Section too small: cannot read abbreviations.");
- EntriesBase = Offset + Hdr.AbbrevTableSize;
+ Offsets.EntriesBase = Offset + Hdr.AbbrevTableSize;
for (;;) {
auto AbbrevOr = extractAbbrev(&Offset);
@@ -679,7 +697,7 @@ void DWARFDebugNames::Entry::dumpParentIdx(
return;
}
- auto AbsoluteOffset = NameIdx->EntriesBase + FormValue.getRawUValue();
+ auto AbsoluteOffset = NameIdx->Offsets.EntriesBase + FormValue.getRawUValue();
W.getOStream() << "Entry @ 0x" + Twine::utohexstr(AbsoluteOffset);
}
@@ -708,14 +726,15 @@ std::error_code DWARFDebugNames::SentinelError::convertToErrorCode() const {
uint64_t DWARFDebugNames::NameIndex::getCUOffset(uint32_t CU) const {
assert(CU < Hdr.CompUnitCount);
const unsigned SectionOffsetSize = dwarf::getDwarfOffsetByteSize(Hdr.Format);
- uint64_t Offset = CUsBase + SectionOffsetSize * CU;
+ uint64_t Offset = Offsets.CUsBase + SectionOffsetSize * CU;
return Section.AccelSection.getRelocatedValue(SectionOffsetSize, &Offset);
}
uint64_t DWARFDebugNames::NameIndex::getLocalTUOffset(uint32_t TU) const {
assert(TU < Hdr.LocalTypeUnitCount);
const unsigned SectionOffsetSize = dwarf::getDwarfOffsetByteSize(Hdr.Format);
- uint64_t Offset = CUsBase + SectionOffsetSize * (Hdr.CompUnitCount + TU);
+ uint64_t Offset =
+ Offsets.CUsBase + SectionOffsetSize * (Hdr.CompUnitCount + TU);
return Section.AccelSection.getRelocatedValue(SectionOffsetSize, &Offset);
}
@@ -723,7 +742,7 @@ uint64_t DWARFDebugNames::NameIndex::getForeignTUSignature(uint32_t TU) const {
assert(TU < Hdr.ForeignTypeUnitCount);
const unsigned SectionOffsetSize = dwarf::getDwarfOffsetByteSize(Hdr.Format);
uint64_t Offset =
- CUsBase +
+ Offsets.CUsBase +
SectionOffsetSize * (Hdr.CompUnitCount + Hdr.LocalTypeUnitCount) + 8 * TU;
return Section.AccelSection.getU64(&Offset);
}
@@ -759,28 +778,28 @@ DWARFDebugNames::NameIndex::getNameTableEntry(uint32_t Index) const {
assert(0 < Index && Index <= Hdr.NameCount);
const unsigned SectionOffsetSize = dwarf::getDwarfOffsetByteSize(Hdr.Format);
uint64_t StringOffsetOffset =
- StringOffsetsBase + SectionOffsetSize * (Index - 1);
+ Offsets.StringOffsetsBase + SectionOffsetSize * (Index - 1);
uint64_t EntryOffsetOffset =
- EntryOffsetsBase + SectionOffsetSize * (Index - 1);
+ Offsets.EntryOffsetsBase + SectionOffsetSize * (Index - 1);
const DWARFDataExtractor &AS = Section.AccelSection;
uint64_t StringOffset =
AS.getRelocatedValue(SectionOffsetSize, &StringOffsetOffset);
uint64_t EntryOffset = AS.getUnsigned(&EntryOffsetOffset, SectionOffsetSize);
- EntryOffset += EntriesBase;
+ EntryOffset += Offsets.EntriesBase;
return {Section.StringSection, Index, StringOffset, EntryOffset};
}
uint32_t
DWARFDebugNames::NameIndex::getBucketArrayEntry(uint32_t Bucket) const {
assert(Bucket < Hdr.BucketCount);
- uint64_t BucketOffset = BucketsBase + 4 * Bucket;
+ uint64_t BucketOffset = Offsets.BucketsBase + 4 * Bucket;
return Section.AccelSection.getU32(&BucketOffset);
}
uint32_t DWARFDebugNames::NameIndex::getHashArrayEntry(uint32_t Index) const {
assert(0 < Index && Index <= Hdr.NameCount);
- uint64_t HashOffset = HashesBase + 4 * (Index - 1);
+ uint64_t HashOffset = Offsets.HashesBase + 4 * (Index - 1);
return Section.AccelSection.getU32(&HashOffset);
}
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
index 28f05644a3aa..572628f45fc2 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFDebugLine.cpp
@@ -389,9 +389,25 @@ Error DWARFDebugLine::Prologue::parse(
if (getVersion() >= 5) {
FormParams.AddrSize = DebugLineData.getU8(Cursor);
- assert((!Cursor || DebugLineData.getAddressSize() == 0 ||
- DebugLineData.getAddressSize() == getAddressSize()) &&
- "Line table header and data extractor disagree");
+ const uint8_t DataAddrSize = DebugLineData.getAddressSize();
+ const uint8_t PrologueAddrSize = getAddressSize();
+ if (Cursor) {
+ if (DataAddrSize == 0) {
+ if (PrologueAddrSize != 4 && PrologueAddrSize != 8) {
+ RecoverableErrorHandler(createStringError(
+ errc::not_supported,
+ "parsing line table prologue at offset 0x%8.8" PRIx64
+ ": invalid address size %" PRIu8,
+ PrologueOffset, PrologueAddrSize));
+ }
+ } else if (DataAddrSize != PrologueAddrSize) {
+ RecoverableErrorHandler(createStringError(
+ errc::not_supported,
+ "parsing line table prologue at offset 0x%8.8" PRIx64 ": address "
+ "size %" PRIu8 " doesn't match architecture address size %" PRIu8,
+ PrologueOffset, PrologueAddrSize, DataAddrSize));
+ }
+ }
SegSelectorSize = DebugLineData.getU8(Cursor);
}
diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp
index 251485a403fe..fba404c9b027 100644
--- a/llvm/lib/IR/AsmWriter.cpp
+++ b/llvm/lib/IR/AsmWriter.cpp
@@ -292,8 +292,8 @@ static const Module *getModuleFromDPI(const DPMarker *Marker) {
return M ? M->getParent() : nullptr;
}
-static const Module *getModuleFromDPI(const DPValue *DPV) {
- return DPV->getMarker() ? getModuleFromDPI(DPV->getMarker()) : nullptr;
+static const Module *getModuleFromDPI(const DbgRecord *DR) {
+ return DR->getMarker() ? getModuleFromDPI(DR->getMarker()) : nullptr;
}
static void PrintCallingConv(unsigned cc, raw_ostream &Out) {
@@ -1141,12 +1141,14 @@ void SlotTracker::processFunctionMetadata(const Function &F) {
void SlotTracker::processDbgRecordMetadata(const DbgRecord &DR) {
if (const DPValue *DPV = dyn_cast<const DPValue>(&DR)) {
CreateMetadataSlot(DPV->getVariable());
- CreateMetadataSlot(DPV->getDebugLoc());
if (DPV->isDbgAssign())
CreateMetadataSlot(DPV->getAssignID());
+ } else if (const DPLabel *DPL = dyn_cast<const DPLabel>(&DR)) {
+ CreateMetadataSlot(DPL->getLabel());
} else {
llvm_unreachable("unsupported DbgRecord kind");
}
+ CreateMetadataSlot(DR.getDebugLoc());
}
void SlotTracker::processInstructionMetadata(const Instruction &I) {
@@ -1505,16 +1507,39 @@ static void WriteAPFloatInternal(raw_ostream &Out, const APFloat &APF) {
static void WriteConstantInternal(raw_ostream &Out, const Constant *CV,
AsmWriterContext &WriterCtx) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
- if (CI->getType()->isIntegerTy(1)) {
- Out << (CI->getZExtValue() ? "true" : "false");
- return;
+ Type *Ty = CI->getType();
+
+ if (Ty->isVectorTy()) {
+ Out << "splat (";
+ WriterCtx.TypePrinter->print(Ty->getScalarType(), Out);
+ Out << " ";
}
- Out << CI->getValue();
+
+ if (Ty->getScalarType()->isIntegerTy(1))
+ Out << (CI->getZExtValue() ? "true" : "false");
+ else
+ Out << CI->getValue();
+
+ if (Ty->isVectorTy())
+ Out << ")";
+
return;
}
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
+ Type *Ty = CFP->getType();
+
+ if (Ty->isVectorTy()) {
+ Out << "splat (";
+ WriterCtx.TypePrinter->print(Ty->getScalarType(), Out);
+ Out << " ";
+ }
+
WriteAPFloatInternal(Out, CFP->getValueAPF());
+
+ if (Ty->isVectorTy())
+ Out << ")";
+
return;
}
@@ -2676,6 +2701,7 @@ public:
void printInstruction(const Instruction &I);
void printDPMarker(const DPMarker &DPI);
void printDPValue(const DPValue &DPI);
+ void printDPLabel(const DPLabel &DPL);
void printDbgRecord(const DbgRecord &DPI);
void printUseListOrder(const Value *V, const std::vector<unsigned> &Shuffle);
@@ -4579,8 +4605,10 @@ void AssemblyWriter::printDPMarker(const DPMarker &Marker) {
void AssemblyWriter::printDbgRecord(const DbgRecord &DR) {
if (auto *DPV = dyn_cast<DPValue>(&DR))
printDPValue(*DPV);
+ else if (auto *DPL = dyn_cast<DPLabel>(&DR))
+ printDPLabel(*DPL);
else
- llvm_unreachable("unsupported dbg record");
+ llvm_unreachable("Unexpected DbgRecord kind");
}
void AssemblyWriter::printDPValue(const DPValue &Value) {
@@ -4622,6 +4650,16 @@ void AssemblyWriter::printDPValue(const DPValue &Value) {
Out << " }";
}
+void AssemblyWriter::printDPLabel(const DPLabel &Label) {
+ // There's no formal representation of a DPLabel -- print purely as
+ // a debugging aid.
+ Out << " DPLabel { ";
+ auto WriterCtx = getContext();
+ WriteAsOperandInternal(Out, Label.getLabel(), WriterCtx, true);
+ Out << " marker @" << Label.getMarker();
+ Out << " }";
+}
+
void AssemblyWriter::printMetadataAttachments(
const SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs,
StringRef Separator) {
@@ -4885,6 +4923,12 @@ void DPMarker::print(raw_ostream &ROS, ModuleSlotTracker &MST,
W.printDPMarker(*this);
}
+void DPLabel::print(raw_ostream &ROS, bool IsForDebug) const {
+
+ ModuleSlotTracker MST(getModuleFromDPI(this), true);
+ print(ROS, MST, IsForDebug);
+}
+
void DPValue::print(raw_ostream &ROS, ModuleSlotTracker &MST,
bool IsForDebug) const {
// There's no formal representation of a DPValue -- print purely as a
@@ -4904,6 +4948,24 @@ void DPValue::print(raw_ostream &ROS, ModuleSlotTracker &MST,
W.printDPValue(*this);
}
+void DPLabel::print(raw_ostream &ROS, ModuleSlotTracker &MST,
+ bool IsForDebug) const {
+ // There's no formal representation of a DbgLabelRecord -- print purely as
+ // a debugging aid.
+ formatted_raw_ostream OS(ROS);
+ SlotTracker EmptySlotTable(static_cast<const Module *>(nullptr));
+ SlotTracker &SlotTable =
+ MST.getMachine() ? *MST.getMachine() : EmptySlotTable;
+ auto incorporateFunction = [&](const Function *F) {
+ if (F)
+ MST.incorporateFunction(*F);
+ };
+ incorporateFunction(Marker->getParent() ? Marker->getParent()->getParent()
+ : nullptr);
+ AssemblyWriter W(OS, SlotTable, getModuleFromDPI(this), nullptr, IsForDebug);
+ W.printDPLabel(*this);
+}
+
void Value::print(raw_ostream &ROS, bool IsForDebug) const {
bool ShouldInitializeAllMetadata = false;
if (auto *I = dyn_cast<Instruction>(this))
diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp
index fd5160209506..19076771ff2e 100644
--- a/llvm/lib/IR/Attributes.cpp
+++ b/llvm/lib/IR/Attributes.cpp
@@ -2045,6 +2045,11 @@ static bool isEqual(const Function &Caller, const Function &Callee) {
Callee.getFnAttribute(AttrClass::getKind());
}
+static bool isEqual(const Function &Caller, const Function &Callee,
+ const StringRef &AttrName) {
+ return Caller.getFnAttribute(AttrName) == Callee.getFnAttribute(AttrName);
+}
+
/// Compute the logical AND of the attributes of the caller and the
/// callee.
///
diff --git a/llvm/lib/IR/BasicBlock.cpp b/llvm/lib/IR/BasicBlock.cpp
index 0680754444f1..6ea876fde5ec 100644
--- a/llvm/lib/IR/BasicBlock.cpp
+++ b/llvm/lib/IR/BasicBlock.cpp
@@ -81,6 +81,12 @@ void BasicBlock::convertToNewDbgValues() {
continue;
}
+ if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(&I)) {
+ DPVals.push_back(new DPLabel(DLI->getLabel(), DLI->getDebugLoc()));
+ DLI->eraseFromParent();
+ continue;
+ }
+
if (DPVals.empty())
continue;
@@ -107,16 +113,12 @@ void BasicBlock::convertFromNewDbgValues() {
continue;
DPMarker &Marker = *Inst.DbgMarker;
- for (DbgRecord &DR : Marker.getDbgValueRange()) {
- if (auto *DPV = dyn_cast<DPValue>(&DR))
- InstList.insert(Inst.getIterator(),
- DPV->createDebugIntrinsic(getModule(), nullptr));
- else
- llvm_unreachable("unsupported DbgRecord kind");
- }
+ for (DbgRecord &DR : Marker.getDbgValueRange())
+ InstList.insert(Inst.getIterator(),
+ DR.createDebugIntrinsic(getModule(), nullptr));
Marker.eraseFromParent();
- };
+ }
// Assume no trailing DPValues: we could technically create them at the end
// of the block, after a terminator, but this would be non-cannonical and
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index a38b912164b1..e6b92aad392f 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -35,6 +35,20 @@
using namespace llvm;
using namespace PatternMatch;
+// As set of temporary options to help migrate how splats are represented.
+static cl::opt<bool> UseConstantIntForFixedLengthSplat(
+ "use-constant-int-for-fixed-length-splat", cl::init(false), cl::Hidden,
+ cl::desc("Use ConstantInt's native fixed-length vector splat support."));
+static cl::opt<bool> UseConstantFPForFixedLengthSplat(
+ "use-constant-fp-for-fixed-length-splat", cl::init(false), cl::Hidden,
+ cl::desc("Use ConstantFP's native fixed-length vector splat support."));
+static cl::opt<bool> UseConstantIntForScalableSplat(
+ "use-constant-int-for-scalable-splat", cl::init(false), cl::Hidden,
+ cl::desc("Use ConstantInt's native scalable vector splat support."));
+static cl::opt<bool> UseConstantFPForScalableSplat(
+ "use-constant-fp-for-scalable-splat", cl::init(false), cl::Hidden,
+ cl::desc("Use ConstantFP's native scalable vector splat support."));
+
//===----------------------------------------------------------------------===//
// Constant Class
//===----------------------------------------------------------------------===//
@@ -825,9 +839,11 @@ bool Constant::isManifestConstant() const {
// ConstantInt
//===----------------------------------------------------------------------===//
-ConstantInt::ConstantInt(IntegerType *Ty, const APInt &V)
+ConstantInt::ConstantInt(Type *Ty, const APInt &V)
: ConstantData(Ty, ConstantIntVal), Val(V) {
- assert(V.getBitWidth() == Ty->getBitWidth() && "Invalid constant for type");
+ assert(V.getBitWidth() ==
+ cast<IntegerType>(Ty->getScalarType())->getBitWidth() &&
+ "Invalid constant for type");
}
ConstantInt *ConstantInt::getTrue(LLVMContext &Context) {
@@ -885,6 +901,26 @@ ConstantInt *ConstantInt::get(LLVMContext &Context, const APInt &V) {
return Slot.get();
}
+// Get a ConstantInt vector with each lane set to the same APInt.
+ConstantInt *ConstantInt::get(LLVMContext &Context, ElementCount EC,
+ const APInt &V) {
+ // Get an existing value or the insertion position.
+ std::unique_ptr<ConstantInt> &Slot =
+ Context.pImpl->IntSplatConstants[std::make_pair(EC, V)];
+ if (!Slot) {
+ IntegerType *ITy = IntegerType::get(Context, V.getBitWidth());
+ VectorType *VTy = VectorType::get(ITy, EC);
+ Slot.reset(new ConstantInt(VTy, V));
+ }
+
+#ifndef NDEBUG
+ IntegerType *ITy = IntegerType::get(Context, V.getBitWidth());
+ VectorType *VTy = VectorType::get(ITy, EC);
+ assert(Slot->getType() == VTy);
+#endif
+ return Slot.get();
+}
+
Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) {
Constant *C = get(cast<IntegerType>(Ty->getScalarType()), V, isSigned);
@@ -1024,6 +1060,26 @@ ConstantFP* ConstantFP::get(LLVMContext &Context, const APFloat& V) {
return Slot.get();
}
+// Get a ConstantFP vector with each lane set to the same APFloat.
+ConstantFP *ConstantFP::get(LLVMContext &Context, ElementCount EC,
+ const APFloat &V) {
+ // Get an existing value or the insertion position.
+ std::unique_ptr<ConstantFP> &Slot =
+ Context.pImpl->FPSplatConstants[std::make_pair(EC, V)];
+ if (!Slot) {
+ Type *EltTy = Type::getFloatingPointTy(Context, V.getSemantics());
+ VectorType *VTy = VectorType::get(EltTy, EC);
+ Slot.reset(new ConstantFP(VTy, V));
+ }
+
+#ifndef NDEBUG
+ Type *EltTy = Type::getFloatingPointTy(Context, V.getSemantics());
+ VectorType *VTy = VectorType::get(EltTy, EC);
+ assert(Slot->getType() == VTy);
+#endif
+ return Slot.get();
+}
+
Constant *ConstantFP::getInfinity(Type *Ty, bool Negative) {
const fltSemantics &Semantics = Ty->getScalarType()->getFltSemantics();
Constant *C = get(Ty->getContext(), APFloat::getInf(Semantics, Negative));
@@ -1036,7 +1092,7 @@ Constant *ConstantFP::getInfinity(Type *Ty, bool Negative) {
ConstantFP::ConstantFP(Type *Ty, const APFloat &V)
: ConstantData(Ty, ConstantFPVal), Val(V) {
- assert(&V.getSemantics() == &Ty->getFltSemantics() &&
+ assert(&V.getSemantics() == &Ty->getScalarType()->getFltSemantics() &&
"FP type Mismatch");
}
@@ -1356,11 +1412,13 @@ Constant *ConstantVector::getImpl(ArrayRef<Constant*> V) {
bool isZero = C->isNullValue();
bool isUndef = isa<UndefValue>(C);
bool isPoison = isa<PoisonValue>(C);
+ bool isSplatFP = UseConstantFPForFixedLengthSplat && isa<ConstantFP>(C);
+ bool isSplatInt = UseConstantIntForFixedLengthSplat && isa<ConstantInt>(C);
- if (isZero || isUndef) {
+ if (isZero || isUndef || isSplatFP || isSplatInt) {
for (unsigned i = 1, e = V.size(); i != e; ++i)
if (V[i] != C) {
- isZero = isUndef = isPoison = false;
+ isZero = isUndef = isPoison = isSplatFP = isSplatInt = false;
break;
}
}
@@ -1371,6 +1429,12 @@ Constant *ConstantVector::getImpl(ArrayRef<Constant*> V) {
return PoisonValue::get(T);
if (isUndef)
return UndefValue::get(T);
+ if (isSplatFP)
+ return ConstantFP::get(C->getContext(), T->getElementCount(),
+ cast<ConstantFP>(C)->getValue());
+ if (isSplatInt)
+ return ConstantInt::get(C->getContext(), T->getElementCount(),
+ cast<ConstantInt>(C)->getValue());
// Check to see if all of the elements are ConstantFP or ConstantInt and if
// the element type is compatible with ConstantDataVector. If so, use it.
@@ -1384,6 +1448,16 @@ Constant *ConstantVector::getImpl(ArrayRef<Constant*> V) {
Constant *ConstantVector::getSplat(ElementCount EC, Constant *V) {
if (!EC.isScalable()) {
+ // Maintain special handling of zero.
+ if (!V->isNullValue()) {
+ if (UseConstantIntForFixedLengthSplat && isa<ConstantInt>(V))
+ return ConstantInt::get(V->getContext(), EC,
+ cast<ConstantInt>(V)->getValue());
+ if (UseConstantFPForFixedLengthSplat && isa<ConstantFP>(V))
+ return ConstantFP::get(V->getContext(), EC,
+ cast<ConstantFP>(V)->getValue());
+ }
+
// If this splat is compatible with ConstantDataVector, use it instead of
// ConstantVector.
if ((isa<ConstantFP>(V) || isa<ConstantInt>(V)) &&
@@ -1394,6 +1468,16 @@ Constant *ConstantVector::getSplat(ElementCount EC, Constant *V) {
return get(Elts);
}
+ // Maintain special handling of zero.
+ if (!V->isNullValue()) {
+ if (UseConstantIntForScalableSplat && isa<ConstantInt>(V))
+ return ConstantInt::get(V->getContext(), EC,
+ cast<ConstantInt>(V)->getValue());
+ if (UseConstantFPForScalableSplat && isa<ConstantFP>(V))
+ return ConstantFP::get(V->getContext(), EC,
+ cast<ConstantFP>(V)->getValue());
+ }
+
Type *VTy = VectorType::get(V->getType(), EC);
if (V->isNullValue())
diff --git a/llvm/lib/IR/DebugProgramInstruction.cpp b/llvm/lib/IR/DebugProgramInstruction.cpp
index eb18be5d817a..389bac4de6a1 100644
--- a/llvm/lib/IR/DebugProgramInstruction.cpp
+++ b/llvm/lib/IR/DebugProgramInstruction.cpp
@@ -64,6 +64,9 @@ void DbgRecord::deleteRecord() {
case ValueKind:
delete cast<DPValue>(this);
return;
+ case LabelKind:
+ delete cast<DPLabel>(this);
+ return;
}
llvm_unreachable("unsupported DbgRecord kind");
}
@@ -73,6 +76,9 @@ void DbgRecord::print(raw_ostream &O, bool IsForDebug) const {
case ValueKind:
cast<DPValue>(this)->print(O, IsForDebug);
return;
+ case LabelKind:
+ cast<DPLabel>(this)->print(O, IsForDebug);
+ return;
};
llvm_unreachable("unsupported DbgRecord kind");
}
@@ -83,6 +89,9 @@ void DbgRecord::print(raw_ostream &O, ModuleSlotTracker &MST,
case ValueKind:
cast<DPValue>(this)->print(O, MST, IsForDebug);
return;
+ case LabelKind:
+ cast<DPLabel>(this)->print(O, MST, IsForDebug);
+ return;
};
llvm_unreachable("unsupported DbgRecord kind");
}
@@ -93,16 +102,23 @@ bool DbgRecord::isIdenticalToWhenDefined(const DbgRecord &R) const {
switch (RecordKind) {
case ValueKind:
return cast<DPValue>(this)->isIdenticalToWhenDefined(*cast<DPValue>(&R));
+ case LabelKind:
+ return cast<DPLabel>(this)->getLabel() == cast<DPLabel>(R).getLabel();
};
llvm_unreachable("unsupported DbgRecord kind");
}
bool DbgRecord::isEquivalentTo(const DbgRecord &R) const {
- if (RecordKind != R.RecordKind)
- return false;
+ return getDebugLoc() == R.getDebugLoc() && isIdenticalToWhenDefined(R);
+}
+
+DbgInfoIntrinsic *
+DbgRecord::createDebugIntrinsic(Module *M, Instruction *InsertBefore) const {
switch (RecordKind) {
case ValueKind:
- return cast<DPValue>(this)->isEquivalentTo(*cast<DPValue>(&R));
+ return cast<DPValue>(this)->createDebugIntrinsic(M, InsertBefore);
+ case LabelKind:
+ return cast<DPLabel>(this)->createDebugIntrinsic(M, InsertBefore);
};
llvm_unreachable("unsupported DbgRecord kind");
}
@@ -307,12 +323,16 @@ DbgRecord *DbgRecord::clone() const {
switch (RecordKind) {
case ValueKind:
return cast<DPValue>(this)->clone();
+ case LabelKind:
+ return cast<DPLabel>(this)->clone();
};
llvm_unreachable("unsupported DbgRecord kind");
}
DPValue *DPValue::clone() const { return new DPValue(*this); }
+DPLabel *DPLabel::clone() const { return new DPLabel(Label, getDebugLoc()); }
+
DbgVariableIntrinsic *
DPValue::createDebugIntrinsic(Module *M, Instruction *InsertBefore) const {
[[maybe_unused]] DICompileUnit *Unit =
@@ -368,6 +388,20 @@ DPValue::createDebugIntrinsic(Module *M, Instruction *InsertBefore) const {
return DVI;
}
+DbgLabelInst *DPLabel::createDebugIntrinsic(Module *M,
+ Instruction *InsertBefore) const {
+ auto *LabelFn = Intrinsic::getDeclaration(M, Intrinsic::dbg_label);
+ Value *Args[] = {
+ MetadataAsValue::get(getDebugLoc()->getContext(), getLabel())};
+ DbgLabelInst *DbgLabel = cast<DbgLabelInst>(
+ CallInst::Create(LabelFn->getFunctionType(), LabelFn, Args));
+ DbgLabel->setTailCall();
+ DbgLabel->setDebugLoc(getDebugLoc());
+ if (InsertBefore)
+ DbgLabel->insertBefore(InsertBefore);
+ return DbgLabel;
+}
+
Value *DPValue::getAddress() const {
auto *MD = getRawAddress();
if (auto *V = dyn_cast<ValueAsMetadata>(MD))
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index ce0df53d9ffb..fc5c9b201487 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -3525,6 +3525,7 @@ CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
"Invalid cast");
unsigned SrcBits = C->getType()->getScalarSizeInBits();
unsigned DstBits = Ty->getScalarSizeInBits();
+ assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
Instruction::CastOps opcode =
(SrcBits == DstBits ? Instruction::BitCast :
(SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
diff --git a/llvm/lib/IR/LLVMContextImpl.cpp b/llvm/lib/IR/LLVMContextImpl.cpp
index 15c90a4fe7b2..a0bf9cae7926 100644
--- a/llvm/lib/IR/LLVMContextImpl.cpp
+++ b/llvm/lib/IR/LLVMContextImpl.cpp
@@ -119,7 +119,9 @@ LLVMContextImpl::~LLVMContextImpl() {
IntZeroConstants.clear();
IntOneConstants.clear();
IntConstants.clear();
+ IntSplatConstants.clear();
FPConstants.clear();
+ FPSplatConstants.clear();
CDSConstants.clear();
// Destroy attribute node lists.
diff --git a/llvm/lib/IR/LLVMContextImpl.h b/llvm/lib/IR/LLVMContextImpl.h
index 6a2029134498..2ee1080a1ffa 100644
--- a/llvm/lib/IR/LLVMContextImpl.h
+++ b/llvm/lib/IR/LLVMContextImpl.h
@@ -1488,8 +1488,12 @@ public:
DenseMap<unsigned, std::unique_ptr<ConstantInt>> IntZeroConstants;
DenseMap<unsigned, std::unique_ptr<ConstantInt>> IntOneConstants;
DenseMap<APInt, std::unique_ptr<ConstantInt>> IntConstants;
+ DenseMap<std::pair<ElementCount, APInt>, std::unique_ptr<ConstantInt>>
+ IntSplatConstants;
DenseMap<APFloat, std::unique_ptr<ConstantFP>> FPConstants;
+ DenseMap<std::pair<ElementCount, APFloat>, std::unique_ptr<ConstantFP>>
+ FPSplatConstants;
FoldingSet<AttributeImpl> AttrsSet;
FoldingSet<AttributeListImpl> AttrsLists;
diff --git a/llvm/lib/LTO/LTOBackend.cpp b/llvm/lib/LTO/LTOBackend.cpp
index 7b3a7590dfa7..6cfe67779b1a 100644
--- a/llvm/lib/LTO/LTOBackend.cpp
+++ b/llvm/lib/LTO/LTOBackend.cpp
@@ -330,8 +330,6 @@ static void runNewPMPasses(const Config &Conf, Module &Mod, TargetMachine *TM,
report_fatal_error(Twine("unable to parse pass pipeline description '") +
Conf.OptPipeline + "': " + toString(std::move(Err)));
}
- } else if (Conf.UseDefaultPipeline) {
- MPM.addPass(PB.buildPerModuleDefaultPipeline(OL));
} else if (IsThinLTO) {
MPM.addPass(PB.buildThinLTODefaultPipeline(OL, ImportSummary));
} else {
diff --git a/llvm/lib/Object/SymbolSize.cpp b/llvm/lib/Object/SymbolSize.cpp
index cb20feffb710..635cd8373afb 100644
--- a/llvm/lib/Object/SymbolSize.cpp
+++ b/llvm/lib/Object/SymbolSize.cpp
@@ -65,6 +65,13 @@ llvm::object::computeSymbolSizes(const ObjectFile &O) {
return Ret;
}
+ if (const auto *E = dyn_cast<WasmObjectFile>(&O)) {
+ for (SymbolRef Sym : E->symbols()) {
+ Ret.push_back({Sym, E->getSymbolSize(Sym)});
+ }
+ return Ret;
+ }
+
// Collect sorted symbol addresses. Include dummy addresses for the end
// of each section.
std::vector<SymEntry> Addresses;
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index f26d95ab1e47..fed7a14c8a2e 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -91,6 +91,7 @@
#include "llvm/CodeGen/JMCInstrumenter.h"
#include "llvm/CodeGen/LowerEmuTLS.h"
#include "llvm/CodeGen/MIRPrinter.h"
+#include "llvm/CodeGen/MachinePassManager.h"
#include "llvm/CodeGen/SafeStack.h"
#include "llvm/CodeGen/SelectOptimize.h"
#include "llvm/CodeGen/ShadowStackGCLowering.h"
@@ -1260,6 +1261,28 @@ static bool isFunctionPassName(StringRef Name, CallbacksT &Callbacks) {
}
template <typename CallbacksT>
+static bool isMachineFunctionPassName(StringRef Name, CallbacksT &Callbacks) {
+ // Explicitly handle pass manager names.
+ if (Name == "machine-function")
+ return true;
+
+ // Explicitly handle custom-parsed pass names.
+ if (parseRepeatPassName(Name))
+ return true;
+
+#define MACHINE_FUNCTION_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) \
+ return true;
+#define MACHINE_FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \
+ return true;
+
+#include "llvm/Passes/MachinePassRegistry.def"
+
+ return callbacksAcceptPassName<MachineFunctionPassManager>(Name, Callbacks);
+}
+
+template <typename CallbacksT>
static bool isLoopNestPassName(StringRef Name, CallbacksT &Callbacks,
bool &UseMemorySSA) {
UseMemorySSA = false;
@@ -1394,6 +1417,13 @@ Error PassBuilder::parseModulePass(ModulePassManager &MPM,
MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
return Error::success();
}
+ if (Name == "machine-function") {
+ MachineFunctionPassManager MFPM;
+ if (auto Err = parseMachinePassPipeline(MFPM, InnerPipeline))
+ return Err;
+ MPM.addPass(createModuleToMachineFunctionPassAdaptor(std::move(MFPM)));
+ return Error::success();
+ }
if (auto Params = parseFunctionPipelineName(Name)) {
if (Params->second)
return make_error<StringError>(
@@ -1874,8 +1904,8 @@ Error PassBuilder::parseMachinePass(MachineFunctionPassManager &MFPM,
}
#include "llvm/Passes/MachinePassRegistry.def"
- for (auto &C : MachinePipelineParsingCallbacks)
- if (C(Name, MFPM))
+ for (auto &C : MachineFunctionPipelineParsingCallbacks)
+ if (C(Name, MFPM, E.InnerPipeline))
return Error::success();
return make_error<StringError>(
formatv("unknown machine pass '{0}'", Name).str(),
@@ -1942,7 +1972,8 @@ Error PassBuilder::parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
void PassBuilder::crossRegisterProxies(LoopAnalysisManager &LAM,
FunctionAnalysisManager &FAM,
CGSCCAnalysisManager &CGAM,
- ModuleAnalysisManager &MAM) {
+ ModuleAnalysisManager &MAM,
+ MachineFunctionAnalysisManager *MFAM) {
MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
MAM.registerPass([&] { return CGSCCAnalysisManagerModuleProxy(CGAM); });
CGAM.registerPass([&] { return ModuleAnalysisManagerCGSCCProxy(MAM); });
@@ -1950,6 +1981,14 @@ void PassBuilder::crossRegisterProxies(LoopAnalysisManager &LAM,
FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); });
LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); });
+ if (MFAM) {
+ MAM.registerPass(
+ [&] { return MachineFunctionAnalysisManagerModuleProxy(*MFAM); });
+ MFAM->registerPass(
+ [&] { return ModuleAnalysisManagerMachineFunctionProxy(MAM); });
+ MFAM->registerPass(
+ [&] { return FunctionAnalysisManagerMachineFunctionProxy(FAM); });
+ }
}
Error PassBuilder::parseModulePassPipeline(ModulePassManager &MPM,
@@ -1991,6 +2030,9 @@ Error PassBuilder::parsePassPipeline(ModulePassManager &MPM,
UseMemorySSA)) {
Pipeline = {{"function", {{UseMemorySSA ? "loop-mssa" : "loop",
std::move(*Pipeline)}}}};
+ } else if (isMachineFunctionPassName(
+ FirstName, MachineFunctionPipelineParsingCallbacks)) {
+ Pipeline = {{"machine-function", std::move(*Pipeline)}};
} else {
for (auto &C : TopLevelPipelineParsingCallbacks)
if (C(MPM, *Pipeline))
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 142bd50b3798..17b55b63ac03 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -247,7 +247,7 @@ static cl::opt<bool>
static cl::opt<bool> EnableJumpTableToSwitch(
"enable-jump-table-to-switch",
- cl::desc("Enable JumpTableToSwitch pass (default = off)"));
+ cl::desc("Enable JumpTableToSwitch pass (default = on)"), cl::init(true));
// This option is used in simplifying testing SampleFDO optimizations for
// profile loading.
diff --git a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
index c62582ac01a4..a99856dcc943 100644
--- a/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64Arm64ECCallLowering.cpp
@@ -712,7 +712,7 @@ bool AArch64Arm64ECCallLowering::processFunction(
// name (emitting the definition) can grab it from the metadata.
//
// FIXME: Handle functions with weak linkage?
- if (F.hasExternalLinkage() || F.hasWeakLinkage() || F.hasLinkOnceLinkage()) {
+ if (!F.hasLocalLinkage() || F.hasAddressTaken()) {
if (std::optional<std::string> MangledName =
getArm64ECMangledFunctionName(F.getName().str())) {
F.setMetadata("arm64ec_unmangled_name",
diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index 5b5ffd7b2feb..4fa719ad67cf 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -1121,7 +1121,8 @@ void AArch64AsmPrinter::emitFunctionEntryLabel() {
TS->emitDirectiveVariantPCS(CurrentFnSym);
}
- if (TM.getTargetTriple().isWindowsArm64EC()) {
+ if (TM.getTargetTriple().isWindowsArm64EC() &&
+ !MF->getFunction().hasLocalLinkage()) {
// For ARM64EC targets, a function definition's name is mangled differently
// from the normal symbol. We emit the alias from the unmangled symbol to
// mangled symbol name here.
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 3485edb69c91..5cc612e89162 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -239,11 +239,6 @@ static cl::opt<bool> EnableRedZone("aarch64-redzone",
cl::desc("enable use of redzone on AArch64"),
cl::init(false), cl::Hidden);
-static cl::opt<bool>
- ReverseCSRRestoreSeq("reverse-csr-restore-seq",
- cl::desc("reverse the CSR restore sequence"),
- cl::init(false), cl::Hidden);
-
static cl::opt<bool> StackTaggingMergeSetTag(
"stack-tagging-merge-settag",
cl::desc("merge settag instruction in function epilog"), cl::init(true),
@@ -307,8 +302,6 @@ bool AArch64FrameLowering::homogeneousPrologEpilog(
return false;
if (!EnableHomogeneousPrologEpilog)
return false;
- if (ReverseCSRRestoreSeq)
- return false;
if (EnableRedZone)
return false;
@@ -3117,7 +3110,27 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs, hasFP(MF));
- auto EmitMI = [&](const RegPairInfo &RPI) -> MachineBasicBlock::iterator {
+ if (homogeneousPrologEpilog(MF, &MBB)) {
+ auto MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::HOM_Epilog))
+ .setMIFlag(MachineInstr::FrameDestroy);
+ for (auto &RPI : RegPairs) {
+ MIB.addReg(RPI.Reg1, RegState::Define);
+ MIB.addReg(RPI.Reg2, RegState::Define);
+ }
+ return true;
+ }
+
+ // For performance reasons restore SVE register in increasing order
+ auto IsPPR = [](const RegPairInfo &c) { return c.Type == RegPairInfo::PPR; };
+ auto PPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsPPR);
+ auto PPREnd = std::find_if_not(PPRBegin, RegPairs.end(), IsPPR);
+ std::reverse(PPRBegin, PPREnd);
+ auto IsZPR = [](const RegPairInfo &c) { return c.Type == RegPairInfo::ZPR; };
+ auto ZPRBegin = std::find_if(RegPairs.begin(), RegPairs.end(), IsZPR);
+ auto ZPREnd = std::find_if_not(ZPRBegin, RegPairs.end(), IsZPR);
+ std::reverse(ZPRBegin, ZPREnd);
+
+ for (const RegPairInfo &RPI : RegPairs) {
unsigned Reg1 = RPI.Reg1;
unsigned Reg2 = RPI.Reg2;
@@ -3191,42 +3204,6 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
MachineMemOperand::MOLoad, Size, Alignment));
if (NeedsWinCFI)
InsertSEH(MIB, TII, MachineInstr::FrameDestroy);
-
- return MIB->getIterator();
- };
-
- // SVE objects are always restored in reverse order.
- for (const RegPairInfo &RPI : reverse(RegPairs))
- if (RPI.isScalable())
- EmitMI(RPI);
-
- if (homogeneousPrologEpilog(MF, &MBB)) {
- auto MIB = BuildMI(MBB, MBBI, DL, TII.get(AArch64::HOM_Epilog))
- .setMIFlag(MachineInstr::FrameDestroy);
- for (auto &RPI : RegPairs) {
- MIB.addReg(RPI.Reg1, RegState::Define);
- MIB.addReg(RPI.Reg2, RegState::Define);
- }
- return true;
- }
-
- if (ReverseCSRRestoreSeq) {
- MachineBasicBlock::iterator First = MBB.end();
- for (const RegPairInfo &RPI : reverse(RegPairs)) {
- if (RPI.isScalable())
- continue;
- MachineBasicBlock::iterator It = EmitMI(RPI);
- if (First == MBB.end())
- First = It;
- }
- if (First != MBB.end())
- MBB.splice(MBBI, &MBB, First);
- } else {
- for (const RegPairInfo &RPI : RegPairs) {
- if (RPI.isScalable())
- continue;
- (void)EmitMI(RPI);
- }
}
return true;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 184ebc19bc9e..3b92e95d7c28 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -541,10 +541,12 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
- setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
+ if (Subtarget->hasFPARMv8())
+ setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
- setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
+ if (Subtarget->hasFPARMv8())
+ setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
@@ -947,9 +949,11 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setTruncStoreAction(MVT::f128, MVT::f32, Expand);
setTruncStoreAction(MVT::f128, MVT::f16, Expand);
- setOperationAction(ISD::BITCAST, MVT::i16, Custom);
- setOperationAction(ISD::BITCAST, MVT::f16, Custom);
- setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
+ if (Subtarget->hasFPARMv8()) {
+ setOperationAction(ISD::BITCAST, MVT::i16, Custom);
+ setOperationAction(ISD::BITCAST, MVT::f16, Custom);
+ setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
+ }
// Indexed loads and stores are supported.
for (unsigned im = (unsigned)ISD::PRE_INC;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 436b21fd1346..bec13484450d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -1308,6 +1308,8 @@ private:
bool preferScalarizeSplat(SDNode *N) const override;
unsigned getMinimumJumpTableEntries() const override;
+
+ bool softPromoteHalfType() const override { return true; }
};
namespace AArch64 {
diff --git a/llvm/lib/Target/AArch64/AArch64SchedTSV110.td b/llvm/lib/Target/AArch64/AArch64SchedTSV110.td
index 0ae9a69fd482..1c577a25bf73 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedTSV110.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedTSV110.td
@@ -419,10 +419,10 @@ def : InstRW<[TSV110Wr_12cyc_1MDU], (instregex "^(S|U)DIVWr$")>;
def : InstRW<[TSV110Wr_20cyc_1MDU], (instregex "^(S|U)DIVXr$")>;
def TSV110ReadMAW : SchedReadAdvance<2, [TSV110Wr_3cyc_1MDU]>;
-def : InstRW<[TSV110Wr_3cyc_1MDU, TSV110ReadMAW], (instrs MADDWrrr, MSUBWrrr)>;
+def : InstRW<[TSV110Wr_3cyc_1MDU, ReadIM, ReadIM, TSV110ReadMAW], (instrs MADDWrrr, MSUBWrrr)>;
def TSV110ReadMAQ : SchedReadAdvance<3, [TSV110Wr_4cyc_1MDU]>;
-def : InstRW<[TSV110Wr_4cyc_1MDU, TSV110ReadMAQ], (instrs MADDXrrr, MSUBXrrr)>;
-def : InstRW<[TSV110Wr_3cyc_1MDU, TSV110ReadMAW], (instregex "(S|U)(MADDL|MSUBL)rrr")>;
+def : InstRW<[TSV110Wr_4cyc_1MDU, ReadIM, ReadIM, TSV110ReadMAQ], (instrs MADDXrrr, MSUBXrrr)>;
+def : InstRW<[TSV110Wr_3cyc_1MDU, ReadIM, ReadIM, TSV110ReadMAW], (instregex "(S|U)(MADDL|MSUBL)rrr")>;
def : InstRW<[TSV110Wr_4cyc_1MDU], (instregex "^(S|U)MULHrr$")>;
diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index 961dded13234..ef7c517732ef 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -21,7 +21,6 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
-#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -520,7 +519,6 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
for (auto &I : SInfo.AllocasToInstrument) {
memtag::AllocaInfo &Info = I.second;
assert(Info.AI && SIB.isInterestingAlloca(*Info.AI));
- TrackingVH<Instruction> OldAI = Info.AI;
memtag::alignAndPadAlloca(Info, kTagGranuleSize);
AllocaInst *AI = Info.AI;
int Tag = NextTag;
@@ -534,7 +532,8 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
ConstantInt::get(IRB.getInt64Ty(), Tag)});
if (Info.AI->hasName())
TagPCall->setName(Info.AI->getName() + ".tag");
- Info.AI->replaceAllUsesWith(TagPCall);
+ // Does not replace metadata, so we don't have to handle DPValues.
+ Info.AI->replaceNonMetadataUsesWith(TagPCall);
TagPCall->setOperand(0, Info.AI);
// Calls to functions that may return twice (e.g. setjmp) confuse the
@@ -574,12 +573,6 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
for (auto *II : Info.LifetimeEnd)
II->eraseFromParent();
}
-
- // Fixup debug intrinsics to point to the new alloca.
- for (auto *DVI : Info.DbgVariableIntrinsics)
- DVI->replaceVariableLocationOp(OldAI, Info.AI);
- for (auto *DPV : Info.DbgVariableRecords)
- DPV->replaceVariableLocationOp(OldAI, Info.AI);
}
// If we have instrumented at least one alloca, all unrecognized lifetime
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 6655931181c2..010e569809e2 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2972,6 +2972,13 @@ InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
Op2Info);
+ case ISD::FREM:
+ // Pass nullptr as fmod/fmodf calls are emitted by the backend even when
+ // those functions are not declared in the module.
+ if (!Ty->isVectorTy())
+ return getCallInstrCost(/*Function*/ nullptr, Ty, {Ty, Ty}, CostKind);
+ return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
+ Op2Info);
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
index b9411e205212..9218760538dc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
@@ -33,6 +33,12 @@ def rcp_sqrt_to_rsq : GICombineRule<
[{ return matchRcpSqrtToRsq(*${rcp}, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFn(*${rcp}, ${matchinfo}); }])>;
+def fdiv_by_sqrt_to_rsq_f16 : GICombineRule<
+ (defs root:$root),
+ (match (G_FSQRT f16:$sqrt, $x, (MIFlags FmContract)),
+ (G_FDIV f16:$dst, $y, $sqrt, (MIFlags FmContract)):$root,
+ [{ return matchFDivSqrtToRsqF16(*${root}); }]),
+ (apply [{ applyFDivSqrtToRsqF16(*${root}, ${x}.getReg()); }])>;
def cvt_f32_ubyteN_matchdata : GIDefMatchData<"CvtF32UByteMatchInfo">;
@@ -156,7 +162,7 @@ def AMDGPUPostLegalizerCombiner: GICombiner<
"AMDGPUPostLegalizerCombinerImpl",
[all_combines, gfx6gfx7_combines, gfx8_combines,
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
- rcp_sqrt_to_rsq, sign_extension_in_reg, smulu64]> {
+ rcp_sqrt_to_rsq, fdiv_by_sqrt_to_rsq_f16, sign_extension_in_reg, smulu64]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 0d3b158a0df7..13d751072913 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -4824,9 +4824,8 @@ bool AMDGPULegalizerInfo::legalizeFDIV16(MachineInstr &MI,
return true;
}
-static const unsigned SPDenormModeBitField =
- AMDGPU::Hwreg::ID_MODE | (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
- (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
+static constexpr unsigned SPDenormModeBitField =
+ AMDGPU::Hwreg::HwregEncoding::encode(AMDGPU::Hwreg::ID_MODE, 4, 2);
// Enable or disable FP32 denorm mode. When 'Enable' is true, emit instructions
// to enable denorm mode. When 'Enable' is false, disable denorm mode.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
index a1c34e92a57f..82e17ddad851 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
@@ -83,6 +83,9 @@ public:
matchRcpSqrtToRsq(MachineInstr &MI,
std::function<void(MachineIRBuilder &)> &MatchInfo) const;
+ bool matchFDivSqrtToRsqF16(MachineInstr &MI) const;
+ void applyFDivSqrtToRsqF16(MachineInstr &MI, const Register &X) const;
+
// FIXME: Should be able to have 2 separate matchdatas rather than custom
// struct boilerplate.
struct CvtF32UByteMatchInfo {
@@ -334,6 +337,26 @@ bool AMDGPUPostLegalizerCombinerImpl::matchRcpSqrtToRsq(
return false;
}
+bool AMDGPUPostLegalizerCombinerImpl::matchFDivSqrtToRsqF16(
+ MachineInstr &MI) const {
+ Register Sqrt = MI.getOperand(2).getReg();
+ return MRI.hasOneNonDBGUse(Sqrt);
+}
+
+void AMDGPUPostLegalizerCombinerImpl::applyFDivSqrtToRsqF16(
+ MachineInstr &MI, const Register &X) const {
+ Register Dst = MI.getOperand(0).getReg();
+ Register Y = MI.getOperand(1).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ uint32_t Flags = MI.getFlags();
+ Register RSQ = B.buildIntrinsic(Intrinsic::amdgcn_rsq, {DstTy})
+ .addUse(X)
+ .setMIFlags(Flags)
+ .getReg(0);
+ B.buildFMul(Dst, RSQ, Y, Flags);
+ MI.eraseFromParent();
+}
+
bool AMDGPUPostLegalizerCombinerImpl::matchCvtF32UByteN(
MachineInstr &MI, CvtF32UByteMatchInfo &MatchInfo) const {
Register SrcReg = MI.getOperand(1).getReg();
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 5b32b34079f4..b7b471d8dc7b 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -7272,11 +7272,11 @@ ParseStatus AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
if (trySkipId("hwreg", AsmToken::LParen)) {
OperandInfoTy HwReg(OPR_ID_UNKNOWN);
- OperandInfoTy Offset(OFFSET_DEFAULT_);
- OperandInfoTy Width(WIDTH_DEFAULT_);
+ OperandInfoTy Offset(HwregOffset::Default);
+ OperandInfoTy Width(HwregSize::Default);
if (parseHwregBody(HwReg, Offset, Width) &&
validateHwreg(HwReg, Offset, Width)) {
- ImmVal = encodeHwreg(HwReg.Id, Offset.Id, Width.Id);
+ ImmVal = HwregEncoding::encode(HwReg.Id, Offset.Id, Width.Id);
} else {
return ParseStatus::Failure;
}
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
index 894607dfdd8c..e1cca17bdbf4 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp
@@ -119,6 +119,12 @@ static DecodeStatus decodeSplitBarrier(MCInst &Inst, unsigned Val,
return addOperand(Inst, DAsm->decodeSplitBarrier(Val));
}
+static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
+ const MCDisassembler *Decoder) {
+ auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
+ return addOperand(Inst, DAsm->decodeDpp8FI(Val));
+}
+
#define DECODE_OPERAND(StaticDecoderName, DecoderName) \
static DecodeStatus StaticDecoderName(MCInst &Inst, unsigned Imm, \
uint64_t /*Addr*/, \
@@ -440,19 +446,6 @@ static inline DecoderUInt128 eat12Bytes(ArrayRef<uint8_t> &Bytes) {
return DecoderUInt128(Lo, Hi);
}
-// The disassembler is greedy, so we need to check FI operand value to
-// not parse a dpp if the correct literal is not set. For dpp16 the
-// autogenerated decoder checks the dpp literal
-static bool isValidDPP8(const MCInst &MI) {
- using namespace llvm::AMDGPU::DPP;
- int FiIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::fi);
- assert(FiIdx != -1);
- if ((unsigned)FiIdx >= MI.getNumOperands())
- return false;
- unsigned Fi = MI.getOperand(FiIdx).getImm();
- return Fi == DPP8_FI_0 || Fi == DPP8_FI_1;
-}
-
DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
ArrayRef<uint8_t> Bytes_,
uint64_t Address,
@@ -460,7 +453,10 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
unsigned MaxInstBytesNum = std::min((size_t)TargetMaxInstBytes, Bytes_.size());
Bytes = Bytes_.slice(0, MaxInstBytesNum);
- DecodeStatus Res = MCDisassembler::Fail;
+ // In case the opcode is not recognized we'll assume a Size of 4 bytes (unless
+ // there are fewer bytes left). This will be overridden on success.
+ Size = std::min((size_t)4, Bytes_.size());
+
do {
// ToDo: better to switch encoding length using some bit predicate
// but it is unknown yet, so try all we can
@@ -469,222 +465,147 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// encodings
if (isGFX11Plus() && Bytes.size() >= 12 ) {
DecoderUInt128 DecW = eat12Bytes(Bytes);
- Res =
- tryDecodeInst(DecoderTableDPP8GFX1196, DecoderTableDPP8GFX11_FAKE1696,
- MI, DecW, Address, CS);
- if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
- break;
- MI = MCInst(); // clear
- Res =
- tryDecodeInst(DecoderTableDPP8GFX1296, DecoderTableDPP8GFX12_FAKE1696,
- MI, DecW, Address, CS);
- if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
- break;
- MI = MCInst(); // clear
-
- const auto convertVOPDPP = [&]() {
- if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P) {
- convertVOP3PDPPInst(MI);
- } else if (AMDGPU::isVOPC64DPP(MI.getOpcode())) {
- convertVOPCDPPInst(MI); // Special VOP3 case
- } else {
- assert(MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3);
- convertVOP3DPPInst(MI); // Regular VOP3 case
- }
- };
- Res = tryDecodeInst(DecoderTableDPPGFX1196, DecoderTableDPPGFX11_FAKE1696,
- MI, DecW, Address, CS);
- if (Res) {
- convertVOPDPP();
- break;
- }
- Res = tryDecodeInst(DecoderTableDPPGFX1296, DecoderTableDPPGFX12_FAKE1696,
- MI, DecW, Address, CS);
- if (Res) {
- convertVOPDPP();
- break;
- }
- Res = tryDecodeInst(DecoderTableGFX1196, MI, DecW, Address, CS);
- if (Res)
+
+ if (tryDecodeInst(DecoderTableGFX1196, DecoderTableGFX11_FAKE1696, MI,
+ DecW, Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX1296, MI, DecW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX1296, DecoderTableGFX12_FAKE1696, MI,
+ DecW, Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX12W6496, MI, DecW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX12W6496, MI, DecW, Address, CS))
break;
}
+
// Reinitialize Bytes
Bytes = Bytes_.slice(0, MaxInstBytesNum);
if (Bytes.size() >= 8) {
const uint64_t QW = eatBytes<uint64_t>(Bytes);
- if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding)) {
- Res = tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS);
- if (Res) {
- if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8)
- == -1)
- break;
- if (convertDPP8Inst(MI) == MCDisassembler::Success)
- break;
- MI = MCInst(); // clear
- }
- }
-
- Res = tryDecodeInst(DecoderTableDPP864, MI, QW, Address, CS);
- if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
- break;
- MI = MCInst(); // clear
-
- Res = tryDecodeInst(DecoderTableDPP8GFX1164,
- DecoderTableDPP8GFX11_FAKE1664, MI, QW, Address, CS);
- if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
- break;
- MI = MCInst(); // clear
-
- Res = tryDecodeInst(DecoderTableDPP8GFX1264,
- DecoderTableDPP8GFX12_FAKE1664, MI, QW, Address, CS);
- if (Res && convertDPP8Inst(MI) == MCDisassembler::Success)
+ if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
+ tryDecodeInst(DecoderTableGFX10_B64, MI, QW, Address, CS))
break;
- MI = MCInst(); // clear
- Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address, CS);
- if (Res) break;
-
- Res = tryDecodeInst(DecoderTableDPPGFX1164, DecoderTableDPPGFX11_FAKE1664,
- MI, QW, Address, CS);
- if (Res) {
- if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC)
- convertVOPCDPPInst(MI);
- break;
- }
-
- Res = tryDecodeInst(DecoderTableDPPGFX1264, DecoderTableDPPGFX12_FAKE1664,
- MI, QW, Address, CS);
- if (Res) {
- if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC)
- convertVOPCDPPInst(MI);
+ if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem) &&
+ tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS))
break;
- }
-
- if (STI.hasFeature(AMDGPU::FeatureUnpackedD16VMem)) {
- Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address, CS);
- if (Res)
- break;
- }
// Some GFX9 subtargets repurposed the v_mad_mix_f32, v_mad_mixlo_f16 and
// v_mad_mixhi_f16 for FMA variants. Try to decode using this special
// table first so we print the correct name.
- if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts)) {
- Res = tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS);
- if (Res)
- break;
- }
+ if (STI.hasFeature(AMDGPU::FeatureFmaMixInsts) &&
+ tryDecodeInst(DecoderTableGFX9_DL64, MI, QW, Address, CS))
+ break;
- if (STI.hasFeature(AMDGPU::FeatureGFX940Insts)) {
- Res = tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS);
- if (Res)
- break;
- }
+ if (STI.hasFeature(AMDGPU::FeatureGFX940Insts) &&
+ tryDecodeInst(DecoderTableGFX94064, MI, QW, Address, CS))
+ break;
- if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts)) {
- Res = tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS);
- if (Res)
- break;
- }
+ if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
+ tryDecodeInst(DecoderTableGFX90A64, MI, QW, Address, CS))
+ break;
- Res = tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX864, MI, QW, Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX964, MI, QW, Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX1064, MI, QW, Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI,
- QW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX1264, DecoderTableGFX12_FAKE1664, MI, QW,
+ Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI,
- QW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX1164, DecoderTableGFX11_FAKE1664, MI, QW,
+ Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX11W6464, MI, QW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX11W6464, MI, QW, Address, CS))
break;
- Res = tryDecodeInst(DecoderTableGFX12W6464, MI, QW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX12W6464, MI, QW, Address, CS))
break;
}
- // Reinitialize Bytes as DPP64 could have eaten too much
+ // Reinitialize Bytes
Bytes = Bytes_.slice(0, MaxInstBytesNum);
// Try decode 32-bit instruction
- if (Bytes.size() < 4) break;
- const uint32_t DW = eatBytes<uint32_t>(Bytes);
- Res = tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS);
- if (Res) break;
+ if (Bytes.size() >= 4) {
+ const uint32_t DW = eatBytes<uint32_t>(Bytes);
- Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS);
- if (Res) break;
+ if (tryDecodeInst(DecoderTableGFX832, MI, DW, Address, CS))
+ break;
- Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS);
- if (Res) break;
+ if (tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address, CS))
+ break;
- if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts)) {
- Res = tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS);
- if (Res)
+ if (tryDecodeInst(DecoderTableGFX932, MI, DW, Address, CS))
break;
- }
- if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding)) {
- Res = tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS);
- if (Res) break;
- }
+ if (STI.hasFeature(AMDGPU::FeatureGFX90AInsts) &&
+ tryDecodeInst(DecoderTableGFX90A32, MI, DW, Address, CS))
+ break;
+
+ if (STI.hasFeature(AMDGPU::FeatureGFX10_BEncoding) &&
+ tryDecodeInst(DecoderTableGFX10_B32, MI, DW, Address, CS))
+ break;
- Res = tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS);
- if (Res) break;
+ if (tryDecodeInst(DecoderTableGFX1032, MI, DW, Address, CS))
+ break;
- Res = tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
- Address, CS);
- if (Res) break;
+ if (tryDecodeInst(DecoderTableGFX1132, DecoderTableGFX11_FAKE1632, MI, DW,
+ Address, CS))
+ break;
- Res = tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
- Address, CS);
+ if (tryDecodeInst(DecoderTableGFX1232, DecoderTableGFX12_FAKE1632, MI, DW,
+ Address, CS))
+ break;
+ }
+
+ return MCDisassembler::Fail;
} while (false);
- if (Res && AMDGPU::isMAC(MI.getOpcode())) {
+ if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP) {
+ if (isMacDPP(MI))
+ convertMacDPPInst(MI);
+
+ if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3P)
+ convertVOP3PDPPInst(MI);
+ else if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOPC) ||
+ AMDGPU::isVOPC64DPP(MI.getOpcode()))
+ convertVOPCDPPInst(MI); // Special VOP3 case
+ else if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dpp8) !=
+ -1)
+ convertDPP8Inst(MI);
+ else if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VOP3)
+ convertVOP3DPPInst(MI); // Regular VOP3 case
+ }
+
+ if (AMDGPU::isMAC(MI.getOpcode())) {
// Insert dummy unused src2_modifiers.
insertNamedMCOperand(MI, MCOperand::createImm(0),
AMDGPU::OpName::src2_modifiers);
}
- if (Res && (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
- MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp)) {
+ if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp ||
+ MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp) {
// Insert dummy unused src2_modifiers.
insertNamedMCOperand(MI, MCOperand::createImm(0),
AMDGPU::OpName::src2_modifiers);
}
- if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DS) &&
+ if ((MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DS) &&
!AMDGPU::hasGDS(STI)) {
insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::gds);
}
- if (Res && (MCII->get(MI.getOpcode()).TSFlags &
- (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD))) {
+ if (MCII->get(MI.getOpcode()).TSFlags &
+ (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD)) {
int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::cpol);
if (CPolPos != -1) {
@@ -700,9 +621,9 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}
- if (Res && (MCII->get(MI.getOpcode()).TSFlags &
- (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
- (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
+ if ((MCII->get(MI.getOpcode()).TSFlags &
+ (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) &&
+ (STI.hasFeature(AMDGPU::FeatureGFX90AInsts))) {
// GFX90A lost TFE, its place is occupied by ACC.
int TFEOpIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
@@ -713,8 +634,8 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}
- if (Res && (MCII->get(MI.getOpcode()).TSFlags &
- (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF))) {
+ if (MCII->get(MI.getOpcode()).TSFlags &
+ (SIInstrFlags::MTBUF | SIInstrFlags::MUBUF)) {
int SWZOpIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::swz);
if (SWZOpIdx != -1) {
@@ -724,7 +645,7 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
}
}
- if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
+ if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG) {
int VAddr0Idx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
int RsrcIdx =
@@ -732,36 +653,32 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
unsigned NSAArgs = RsrcIdx - VAddr0Idx - 1;
if (VAddr0Idx >= 0 && NSAArgs > 0) {
unsigned NSAWords = (NSAArgs + 3) / 4;
- if (Bytes.size() < 4 * NSAWords) {
- Res = MCDisassembler::Fail;
- } else {
- for (unsigned i = 0; i < NSAArgs; ++i) {
- const unsigned VAddrIdx = VAddr0Idx + 1 + i;
- auto VAddrRCID =
- MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
- MI.insert(MI.begin() + VAddrIdx,
- createRegOperand(VAddrRCID, Bytes[i]));
- }
- Bytes = Bytes.slice(4 * NSAWords);
+ if (Bytes.size() < 4 * NSAWords)
+ return MCDisassembler::Fail;
+ for (unsigned i = 0; i < NSAArgs; ++i) {
+ const unsigned VAddrIdx = VAddr0Idx + 1 + i;
+ auto VAddrRCID =
+ MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass;
+ MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i]));
}
+ Bytes = Bytes.slice(4 * NSAWords);
}
- if (Res)
- Res = convertMIMGInst(MI);
+ convertMIMGInst(MI);
}
- if (Res && (MCII->get(MI.getOpcode()).TSFlags &
- (SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE)))
- Res = convertMIMGInst(MI);
+ if (MCII->get(MI.getOpcode()).TSFlags &
+ (SIInstrFlags::VIMAGE | SIInstrFlags::VSAMPLE))
+ convertMIMGInst(MI);
- if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP))
- Res = convertEXPInst(MI);
+ if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::EXP)
+ convertEXPInst(MI);
- if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP))
- Res = convertVINTERPInst(MI);
+ if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::VINTERP)
+ convertVINTERPInst(MI);
- if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SDWA))
- Res = convertSDWAInst(MI);
+ if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SDWA)
+ convertSDWAInst(MI);
int VDstIn_Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
AMDGPU::OpName::vdst_in);
@@ -782,27 +699,23 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
int ImmLitIdx =
AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::imm);
bool IsSOPK = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::SOPK;
- if (Res && ImmLitIdx != -1 && !IsSOPK)
- Res = convertFMAanyK(MI, ImmLitIdx);
+ if (ImmLitIdx != -1 && !IsSOPK)
+ convertFMAanyK(MI, ImmLitIdx);
- // if the opcode was not recognized we'll assume a Size of 4 bytes
- // (unless there are fewer bytes left)
- Size = Res ? (MaxInstBytesNum - Bytes.size())
- : std::min((size_t)4, Bytes_.size());
- return Res;
+ Size = MaxInstBytesNum - Bytes.size();
+ return MCDisassembler::Success;
}
-DecodeStatus AMDGPUDisassembler::convertEXPInst(MCInst &MI) const {
+void AMDGPUDisassembler::convertEXPInst(MCInst &MI) const {
if (STI.hasFeature(AMDGPU::FeatureGFX11Insts)) {
// The MCInst still has these fields even though they are no longer encoded
// in the GFX11 instruction.
insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::vm);
insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::compr);
}
- return MCDisassembler::Success;
}
-DecodeStatus AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const {
+void AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const {
if (MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_gfx11 ||
MI.getOpcode() == AMDGPU::V_INTERP_P10_F16_F32_inreg_gfx12 ||
MI.getOpcode() == AMDGPU::V_INTERP_P10_RTZ_F16_F32_inreg_gfx11 ||
@@ -815,10 +728,9 @@ DecodeStatus AMDGPUDisassembler::convertVINTERPInst(MCInst &MI) const {
// instruction.
insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::op_sel);
}
- return MCDisassembler::Success;
}
-DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
+void AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
if (STI.hasFeature(AMDGPU::FeatureGFX9) ||
STI.hasFeature(AMDGPU::FeatureGFX10)) {
if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::sdst))
@@ -835,7 +747,6 @@ DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
}
}
- return MCDisassembler::Success;
}
struct VOPModifiers {
@@ -939,56 +850,40 @@ void AMDGPUDisassembler::convertMacDPPInst(MCInst &MI) const {
AMDGPU::OpName::src2_modifiers);
}
-// We must check FI == literal to reject not genuine dpp8 insts, and we must
-// first add optional MI operands to check FI
-DecodeStatus AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
+void AMDGPUDisassembler::convertDPP8Inst(MCInst &MI) const {
unsigned Opc = MI.getOpcode();
- if (MCII->get(Opc).TSFlags & SIInstrFlags::VOP3P) {
- convertVOP3PDPPInst(MI);
- } else if ((MCII->get(Opc).TSFlags & SIInstrFlags::VOPC) ||
- AMDGPU::isVOPC64DPP(Opc)) {
- convertVOPCDPPInst(MI);
- } else {
- if (isMacDPP(MI))
- convertMacDPPInst(MI);
+ int VDstInIdx =
+ AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
+ if (VDstInIdx != -1)
+ insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
- int VDstInIdx =
- AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst_in);
- if (VDstInIdx != -1)
- insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::vdst_in);
+ if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp8_gfx12 ||
+ MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp8_gfx12)
+ insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::src2);
- if (MI.getOpcode() == AMDGPU::V_CVT_SR_BF8_F32_e64_dpp8_gfx12 ||
- MI.getOpcode() == AMDGPU::V_CVT_SR_FP8_F32_e64_dpp8_gfx12)
- insertNamedMCOperand(MI, MI.getOperand(0), AMDGPU::OpName::src2);
+ unsigned DescNumOps = MCII->get(Opc).getNumOperands();
+ if (MI.getNumOperands() < DescNumOps &&
+ AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
+ convertTrue16OpSel(MI);
+ auto Mods = collectVOPModifiers(MI);
+ insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
+ AMDGPU::OpName::op_sel);
+ } else {
+ // Insert dummy unused src modifiers.
+ if (MI.getNumOperands() < DescNumOps &&
+ AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
+ insertNamedMCOperand(MI, MCOperand::createImm(0),
+ AMDGPU::OpName::src0_modifiers);
- unsigned DescNumOps = MCII->get(Opc).getNumOperands();
if (MI.getNumOperands() < DescNumOps &&
- AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel)) {
- convertTrue16OpSel(MI);
- auto Mods = collectVOPModifiers(MI);
- insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
- AMDGPU::OpName::op_sel);
- } else {
- // Insert dummy unused src modifiers.
- if (MI.getNumOperands() < DescNumOps &&
- AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src0_modifiers))
- insertNamedMCOperand(MI, MCOperand::createImm(0),
- AMDGPU::OpName::src0_modifiers);
-
- if (MI.getNumOperands() < DescNumOps &&
- AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
- insertNamedMCOperand(MI, MCOperand::createImm(0),
- AMDGPU::OpName::src1_modifiers);
- }
+ AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
+ insertNamedMCOperand(MI, MCOperand::createImm(0),
+ AMDGPU::OpName::src1_modifiers);
}
- return isValidDPP8(MI) ? MCDisassembler::Success : MCDisassembler::SoftFail;
}
-DecodeStatus AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
- if (isMacDPP(MI))
- convertMacDPPInst(MI);
-
+void AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
convertTrue16OpSel(MI);
int VDstInIdx =
@@ -1008,13 +903,12 @@ DecodeStatus AMDGPUDisassembler::convertVOP3DPPInst(MCInst &MI) const {
insertNamedMCOperand(MI, MCOperand::createImm(Mods.OpSel),
AMDGPU::OpName::op_sel);
}
- return MCDisassembler::Success;
}
// Note that before gfx10, the MIMG encoding provided no information about
// VADDR size. Consequently, decoded instructions always show address as if it
// has 1 dword, which could be not really so.
-DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
+void AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
auto TSFlags = MCII->get(MI.getOpcode()).TSFlags;
int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
@@ -1043,7 +937,7 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
if (BaseOpcode->BVH) {
// Add A16 operand for intersect_ray instructions
addOperand(MI, MCOperand::createImm(BaseOpcode->A16));
- return MCDisassembler::Success;
+ return;
}
bool IsAtomic = (VDstIdx != -1);
@@ -1078,7 +972,7 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
if (!STI.hasFeature(AMDGPU::FeaturePartialNSAEncoding)) {
// The NSA encoding does not contain enough operands for the
// combination of base opcode / dimension. Should this be an error?
- return MCDisassembler::Success;
+ return;
}
IsPartialNSA = true;
}
@@ -1097,12 +991,12 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
DstSize += 1;
if (DstSize == Info->VDataDwords && AddrSize == Info->VAddrDwords)
- return MCDisassembler::Success;
+ return;
int NewOpcode =
AMDGPU::getMIMGOpcode(Info->BaseOpcode, Info->MIMGEncoding, DstSize, AddrSize);
if (NewOpcode == -1)
- return MCDisassembler::Success;
+ return;
// Widen the register to the correct number of enabled channels.
unsigned NewVdata = AMDGPU::NoRegister;
@@ -1119,7 +1013,7 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
if (NewVdata == AMDGPU::NoRegister) {
// It's possible to encode this such that the low register + enabled
// components exceeds the register count.
- return MCDisassembler::Success;
+ return;
}
}
@@ -1137,7 +1031,7 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
NewVAddrSA = MRI.getMatchingSuperReg(VAddrSA, AMDGPU::sub0,
&MRI.getRegClass(AddrRCID));
if (!NewVAddrSA)
- return MCDisassembler::Success;
+ return;
}
MI.setOpcode(NewOpcode);
@@ -1158,14 +1052,12 @@ DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
MI.erase(MI.begin() + VAddr0Idx + AddrSize,
MI.begin() + VAddr0Idx + Info->VAddrDwords);
}
-
- return MCDisassembler::Success;
}
// Opsel and neg bits are used in src_modifiers and standalone operands. Autogen
// decoder only adds to src_modifiers, so manually add the bits to the other
// operands.
-DecodeStatus AMDGPUDisassembler::convertVOP3PDPPInst(MCInst &MI) const {
+void AMDGPUDisassembler::convertVOP3PDPPInst(MCInst &MI) const {
unsigned Opc = MI.getOpcode();
unsigned DescNumOps = MCII->get(Opc).getNumOperands();
auto Mods = collectVOPModifiers(MI, true);
@@ -1190,12 +1082,10 @@ DecodeStatus AMDGPUDisassembler::convertVOP3PDPPInst(MCInst &MI) const {
AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::neg_hi))
insertNamedMCOperand(MI, MCOperand::createImm(Mods.NegHi),
AMDGPU::OpName::neg_hi);
-
- return MCDisassembler::Success;
}
// Create dummy old operand and insert optional operands
-DecodeStatus AMDGPUDisassembler::convertVOPCDPPInst(MCInst &MI) const {
+void AMDGPUDisassembler::convertVOPCDPPInst(MCInst &MI) const {
unsigned Opc = MI.getOpcode();
unsigned DescNumOps = MCII->get(Opc).getNumOperands();
@@ -1212,11 +1102,9 @@ DecodeStatus AMDGPUDisassembler::convertVOPCDPPInst(MCInst &MI) const {
AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::src1_modifiers))
insertNamedMCOperand(MI, MCOperand::createImm(0),
AMDGPU::OpName::src1_modifiers);
- return MCDisassembler::Success;
}
-DecodeStatus AMDGPUDisassembler::convertFMAanyK(MCInst &MI,
- int ImmLitIdx) const {
+void AMDGPUDisassembler::convertFMAanyK(MCInst &MI, int ImmLitIdx) const {
assert(HasLiteral && "Should have decoded a literal");
const MCInstrDesc &Desc = MCII->get(MI.getOpcode());
unsigned DescNumOps = Desc.getNumOperands();
@@ -1232,7 +1120,6 @@ DecodeStatus AMDGPUDisassembler::convertFMAanyK(MCInst &MI,
IsDeferredOp)
Op.setImm(Literal);
}
- return MCDisassembler::Success;
}
const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
@@ -1831,6 +1718,12 @@ MCOperand AMDGPUDisassembler::decodeSplitBarrier(unsigned Val) const {
return decodeSrcOp(OPW32, Val);
}
+MCOperand AMDGPUDisassembler::decodeDpp8FI(unsigned Val) const {
+ if (Val != AMDGPU::DPP::DPP8_FI_0 && Val != AMDGPU::DPP::DPP8_FI_1)
+ return MCOperand();
+ return MCOperand::createImm(Val);
+}
+
bool AMDGPUDisassembler::isVI() const {
return STI.hasFeature(AMDGPU::FeatureVolcanicIslands);
}
diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
index 3142b8a14a4d..2e1b6fb1c740 100644
--- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
+++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h
@@ -194,15 +194,15 @@ public:
DecodeStatus decodeCOMPUTE_PGM_RSRC3(uint32_t FourByteBuffer,
raw_string_ostream &KdStream) const;
- DecodeStatus convertEXPInst(MCInst &MI) const;
- DecodeStatus convertVINTERPInst(MCInst &MI) const;
- DecodeStatus convertFMAanyK(MCInst &MI, int ImmLitIdx) const;
- DecodeStatus convertSDWAInst(MCInst &MI) const;
- DecodeStatus convertDPP8Inst(MCInst &MI) const;
- DecodeStatus convertMIMGInst(MCInst &MI) const;
- DecodeStatus convertVOP3DPPInst(MCInst &MI) const;
- DecodeStatus convertVOP3PDPPInst(MCInst &MI) const;
- DecodeStatus convertVOPCDPPInst(MCInst &MI) const;
+ void convertEXPInst(MCInst &MI) const;
+ void convertVINTERPInst(MCInst &MI) const;
+ void convertFMAanyK(MCInst &MI, int ImmLitIdx) const;
+ void convertSDWAInst(MCInst &MI) const;
+ void convertDPP8Inst(MCInst &MI) const;
+ void convertMIMGInst(MCInst &MI) const;
+ void convertVOP3DPPInst(MCInst &MI) const;
+ void convertVOP3PDPPInst(MCInst &MI) const;
+ void convertVOPCDPPInst(MCInst &MI) const;
void convertMacDPPInst(MCInst &MI) const;
void convertTrue16OpSel(MCInst &MI) const;
@@ -261,6 +261,7 @@ public:
MCOperand decodeBoolReg(unsigned Val) const;
MCOperand decodeSplitBarrier(unsigned Val) const;
+ MCOperand decodeDpp8FI(unsigned Val) const;
int getTTmpIdx(unsigned Val) const;
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index a727134b53de..00fa93cc1923 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -177,7 +177,7 @@ static bool isLdsDma(const MachineInstr &MI) {
static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
AMDGPU::OpName::simm16);
- return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
+ return std::get<0>(AMDGPU::Hwreg::HwregEncoding::decode(RegOp->getImm()));
}
ScheduleHazardRecognizer::HazardType
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index a45fea6701f3..a32be1e50a60 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -1778,13 +1778,9 @@ void AMDGPUInstPrinter::printSDelayALU(const MCInst *MI, unsigned OpNo,
void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI, raw_ostream &O) {
- unsigned Id;
- unsigned Offset;
- unsigned Width;
-
using namespace llvm::AMDGPU::Hwreg;
unsigned Val = MI->getOperand(OpNo).getImm();
- decodeHwreg(Val, Id, Offset, Width);
+ auto [Id, Offset, Width] = HwregEncoding::decode(Val);
StringRef HwRegName = getHwreg(Id, STI);
O << "hwreg(";
@@ -1793,9 +1789,8 @@ void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo,
} else {
O << Id;
}
- if (Width != WIDTH_DEFAULT_ || Offset != OFFSET_DEFAULT_) {
+ if (Width != HwregSize::Default || Offset != HwregOffset::Default)
O << ", " << Offset << ", " << Width;
- }
O << ')';
}
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 98310c3f70c4..0b516bfffb9b 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -549,33 +549,12 @@ enum Id { // HwRegCode, (6) [5:0]
ID_SQ_PERF_SNAPSHOT_DATA1 = 22,
ID_SQ_PERF_SNAPSHOT_PC_LO = 23,
ID_SQ_PERF_SNAPSHOT_PC_HI = 24,
-
- ID_SHIFT_ = 0,
- ID_WIDTH_ = 6,
- ID_MASK_ = (((1 << ID_WIDTH_) - 1) << ID_SHIFT_)
};
enum Offset : unsigned { // Offset, (5) [10:6]
- OFFSET_DEFAULT_ = 0,
- OFFSET_SHIFT_ = 6,
- OFFSET_WIDTH_ = 5,
- OFFSET_MASK_ = (((1 << OFFSET_WIDTH_) - 1) << OFFSET_SHIFT_),
-
OFFSET_MEM_VIOL = 8,
};
-enum WidthMinusOne : unsigned { // WidthMinusOne, (5) [15:11]
- WIDTH_M1_DEFAULT_ = 31,
- WIDTH_M1_SHIFT_ = 11,
- WIDTH_M1_WIDTH_ = 5,
- WIDTH_M1_MASK_ = (((1 << WIDTH_M1_WIDTH_) - 1) << WIDTH_M1_SHIFT_),
-};
-
-// Some values from WidthMinusOne mapped into Width domain.
-enum Width : unsigned {
- WIDTH_DEFAULT_ = WIDTH_M1_DEFAULT_ + 1,
-};
-
enum ModeRegisterMasks : uint32_t {
FP_ROUND_MASK = 0xf << 0, // Bits 0..3
FP_DENORM_MASK = 0xf << 4, // Bits 4..7
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index d02aee71870e..4f106bf0dfb1 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -478,14 +478,13 @@ void SIFrameLowering::emitEntryFunctionFlatScratchInit(
.addImm(0);
Addc->getOperand(3).setIsDead(); // Mark SCC as dead.
- BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)).
- addReg(FlatScrInitLo).
- addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_LO |
- (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_)));
- BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32)).
- addReg(FlatScrInitHi).
- addImm(int16_t(AMDGPU::Hwreg::ID_FLAT_SCR_HI |
- (31 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_)));
+ using namespace AMDGPU::Hwreg;
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
+ .addReg(FlatScrInitLo)
+ .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_LO, 0, 32)));
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SETREG_B32))
+ .addReg(FlatScrInitHi)
+ .addImm(int16_t(HwregEncoding::encode(ID_FLAT_SCR_HI, 0, 32)));
return;
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 257dff6ef683..d8f528d86611 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -3960,7 +3960,7 @@ SDValue SITargetLowering::lowerGET_ROUNDING(SDValue Op,
assert(Op.getValueType() == MVT::i32);
uint32_t BothRoundHwReg =
- AMDGPU::Hwreg::encodeHwreg(AMDGPU::Hwreg::ID_MODE, 0, 4);
+ AMDGPU::Hwreg::HwregEncoding::encode(AMDGPU::Hwreg::ID_MODE, 0, 4);
SDValue GetRoundBothImm = DAG.getTargetConstant(BothRoundHwReg, SL, MVT::i32);
SDValue IntrinID =
@@ -4195,8 +4195,8 @@ SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
MachineBasicBlock::iterator I = LoopBB->end();
- const unsigned EncodedReg = AMDGPU::Hwreg::encodeHwreg(
- AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
+ const unsigned EncodedReg = AMDGPU::Hwreg::HwregEncoding::encode(
+ AMDGPU::Hwreg::ID_TRAPSTS, AMDGPU::Hwreg::OFFSET_MEM_VIOL, 1);
// Clear TRAP_STS.MEM_VIOL
BuildMI(*LoopBB, LoopBB->begin(), DL, TII->get(AMDGPU::S_SETREG_IMM32_B32))
@@ -4999,18 +4999,16 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
// Otherwise there was overflow and the result is hi2:0. In both cases the
// result should represent the actual time at some point during the sequence
// of three getregs.
+ using namespace AMDGPU::Hwreg;
Register RegHi1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_GETREG_B32), RegHi1)
- .addImm(AMDGPU::Hwreg::encodeHwreg(AMDGPU::Hwreg::ID_SHADER_CYCLES_HI,
- 0, 32));
+ .addImm(HwregEncoding::encode(ID_SHADER_CYCLES_HI, 0, 32));
Register RegLo1 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_GETREG_B32), RegLo1)
- .addImm(
- AMDGPU::Hwreg::encodeHwreg(AMDGPU::Hwreg::ID_SHADER_CYCLES, 0, 32));
+ .addImm(HwregEncoding::encode(ID_SHADER_CYCLES, 0, 32));
Register RegHi2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_GETREG_B32), RegHi2)
- .addImm(AMDGPU::Hwreg::encodeHwreg(AMDGPU::Hwreg::ID_SHADER_CYCLES_HI,
- 0, 32));
+ .addImm(HwregEncoding::encode(ID_SHADER_CYCLES_HI, 0, 32));
BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CMP_EQ_U32))
.addReg(RegHi1)
.addReg(RegHi2);
@@ -5207,8 +5205,8 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
// FIXME: This could be predicates on the immediate, but tablegen doesn't
// allow you to have a no side effect instruction in the output of a
// sideeffecting pattern.
- unsigned ID, Offset, Width;
- AMDGPU::Hwreg::decodeHwreg(MI.getOperand(1).getImm(), ID, Offset, Width);
+ auto [ID, Offset, Width] =
+ AMDGPU::Hwreg::HwregEncoding::decode(MI.getOperand(1).getImm());
if (ID != AMDGPU::Hwreg::ID_MODE)
return BB;
@@ -10495,9 +10493,8 @@ SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
DenominatorScaled, Flags);
- const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
- (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
- (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
+ using namespace AMDGPU::Hwreg;
+ const unsigned Denorm32Reg = HwregEncoding::encode(ID_MODE, 4, 2);
const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i32);
const MachineFunction &MF = DAG.getMachineFunction();
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 6ecb1c8bf6e1..a6184c5e1e04 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -480,6 +480,10 @@ public:
// WaitEventType to corresponding counter values in InstCounterType.
virtual const unsigned *getWaitEventMask() const = 0;
+ // Returns a new waitcnt with all counters except VScnt set to 0. If
+ // IncludeVSCnt is true, VScnt is set to 0, otherwise it is set to ~0u.
+ virtual AMDGPU::Waitcnt getAllZeroWaitcnt(bool IncludeVSCnt) const = 0;
+
virtual ~WaitcntGenerator() = default;
};
@@ -516,6 +520,8 @@ public:
return WaitEventMaskForInstPreGFX12;
}
+
+ virtual AMDGPU::Waitcnt getAllZeroWaitcnt(bool IncludeVSCnt) const override;
};
class WaitcntGeneratorGFX12Plus : public WaitcntGenerator {
@@ -549,6 +555,8 @@ public:
return WaitEventMaskForInstGFX12Plus;
}
+
+ virtual AMDGPU::Waitcnt getAllZeroWaitcnt(bool IncludeVSCnt) const override;
};
class SIInsertWaitcnts : public MachineFunctionPass {
@@ -1304,6 +1312,16 @@ bool WaitcntGeneratorPreGFX12::createNewWaitcnt(
return Modified;
}
+AMDGPU::Waitcnt
+WaitcntGeneratorPreGFX12::getAllZeroWaitcnt(bool IncludeVSCnt) const {
+ return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt && ST->hasVscnt() ? 0 : ~0u);
+}
+
+AMDGPU::Waitcnt
+WaitcntGeneratorGFX12Plus::getAllZeroWaitcnt(bool IncludeVSCnt) const {
+ return AMDGPU::Waitcnt(0, 0, 0, IncludeVSCnt ? 0 : ~0u, 0, 0, 0);
+}
+
/// Combine consecutive S_WAIT_*CNT instructions that precede \p It and
/// follow \p OldWaitcntInstr and apply any extra waits from \p Wait that
/// were added by previous passes. Currently this pass conservatively
@@ -1613,8 +1631,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
MI.getOpcode() == AMDGPU::SI_RETURN ||
MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
(MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
- Wait = Wait.combined(
- AMDGPU::Waitcnt::allZeroExceptVsCnt(ST->hasExtendedWaitCounts()));
+ Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false));
}
// Identify S_ENDPGM instructions which may have to wait for outstanding VMEM
// stores. In this case it can be useful to send a message to explicitly
@@ -1834,8 +1851,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
// cause an exception. Otherwise, insert an explicit S_WAITCNT 0 here.
if (MI.getOpcode() == AMDGPU::S_BARRIER &&
!ST->hasAutoWaitcntBeforeBarrier() && !ST->supportsBackOffBarrier()) {
- Wait = Wait.combined(
- AMDGPU::Waitcnt::allZero(ST->hasExtendedWaitCounts(), ST->hasVscnt()));
+ Wait = Wait.combined(WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/true));
}
// TODO: Remove this work-around, enable the assert for Bug 457939
@@ -1851,7 +1867,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
ScoreBrackets.simplifyWaitcnt(Wait);
if (ForceEmitZeroWaitcnts)
- Wait = AMDGPU::Waitcnt::allZeroExceptVsCnt(ST->hasExtendedWaitCounts());
+ Wait = WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false);
if (ForceEmitWaitcnt[LOAD_CNT])
Wait.LoadCnt = 0;
@@ -2089,7 +2105,7 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
if (callWaitsOnFunctionReturn(Inst)) {
// Act as a wait on everything
ScoreBrackets->applyWaitcnt(
- AMDGPU::Waitcnt::allZeroExceptVsCnt(ST->hasExtendedWaitCounts()));
+ WCG->getAllZeroWaitcnt(/*IncludeVSCnt=*/false));
ScoreBrackets->setStateOnFunctionEntryOrReturn();
} else {
// May need to way wait for anything.
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 97c723752b70..34cdb09b0e15 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -987,8 +987,8 @@ def SDWAVopcDst : BoolRC {
}
class NamedIntOperand<ValueType Type, string Prefix, bit Optional = 1,
- string ConvertMethod = "nullptr">
- : CustomOperand<Type, Optional, NAME> {
+ string name = NAME, string ConvertMethod = "nullptr">
+ : CustomOperand<Type, Optional, name> {
let ParserMethod =
"[this](OperandVector &Operands) -> ParseStatus { "#
"return parseIntWithPrefix(\""#Prefix#"\", Operands, "#
@@ -1090,9 +1090,12 @@ let DefaultValue = "0xf" in {
def DppRowMask : NamedIntOperand<i32, "row_mask">;
def DppBankMask : NamedIntOperand<i32, "bank_mask">;
}
-def DppBoundCtrl : NamedIntOperand<i1, "bound_ctrl", 1,
+def DppBoundCtrl : NamedIntOperand<i1, "bound_ctrl", 1, "DppBoundCtrl",
"[this] (int64_t &BC) -> bool { return convertDppBoundCtrl(BC); }">;
-def DppFI : NamedIntOperand<i32, "fi">;
+
+let DecoderMethod = "decodeDpp8FI" in
+def Dpp8FI : NamedIntOperand<i32, "fi", 1, "DppFI">;
+def Dpp16FI : NamedIntOperand<i32, "fi", 1, "DppFI">;
def blgp : CustomOperand<i32, 1, "BLGP">;
def CBSZ : NamedIntOperand<i32, "cbsz">;
@@ -1823,7 +1826,7 @@ class getInsDPP16 <RegisterOperand OldRC, RegisterOperand Src0RC, RegisterOperan
Operand Src0Mod, Operand Src1Mod, Operand Src2Mod, bit HasOld = 1> {
dag ret = !con(getInsDPP<OldRC, Src0RC, Src1RC, Src2RC, NumSrcArgs,
HasModifiers, Src0Mod, Src1Mod, Src2Mod, HasOld>.ret,
- (ins DppFI:$fi));
+ (ins Dpp16FI:$fi));
}
class getInsDPP8 <RegisterOperand OldRC, RegisterOperand Src0RC, RegisterOperand Src1RC,
@@ -1831,7 +1834,7 @@ class getInsDPP8 <RegisterOperand OldRC, RegisterOperand Src0RC, RegisterOperand
Operand Src0Mod, Operand Src1Mod, Operand Src2Mod, bit HasOld = 1> {
dag ret = !con(getInsDPPBase<OldRC, Src0RC, Src1RC, Src2RC, NumSrcArgs,
HasModifiers, Src0Mod, Src1Mod, Src2Mod, HasOld>.ret,
- (ins dpp8:$dpp8, DppFI:$fi));
+ (ins dpp8:$dpp8, Dpp8FI:$fi));
}
class getInsVOP3DPPBase<dag VOP3Base, RegisterOperand OldRC, int NumSrcArgs, bit HasOld> {
@@ -1851,12 +1854,12 @@ class getInsVOP3DPP<dag VOP3Base, RegisterOperand OldRC, int NumSrcArgs, bit Has
class getInsVOP3DPP16<dag VOP3Base, RegisterOperand OldRC, int NumSrcArgs, bit HasOld = 1> {
dag ret = !con(getInsVOP3DPP<VOP3Base,OldRC,NumSrcArgs,HasOld>.ret,
- (ins DppFI:$fi));
+ (ins Dpp16FI:$fi));
}
class getInsVOP3DPP8<dag VOP3Base, RegisterOperand OldRC, int NumSrcArgs, bit HasOld = 1> {
dag ret = !con(getInsVOP3DPPBase<VOP3Base,OldRC,NumSrcArgs,HasOld>.ret,
- (ins dpp8:$dpp8, DppFI:$fi));
+ (ins dpp8:$dpp8, Dpp8FI:$fi));
}
// Ins for SDWA
diff --git a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
index e62ad026dc5c..c01b1266a553 100644
--- a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
+++ b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
@@ -225,11 +225,10 @@ void SIModeRegister::insertSetreg(MachineBasicBlock &MBB, MachineInstr *MI,
unsigned Offset = llvm::countr_zero<unsigned>(InstrMode.Mask);
unsigned Width = llvm::countr_one<unsigned>(InstrMode.Mask >> Offset);
unsigned Value = (InstrMode.Mode >> Offset) & ((1 << Width) - 1);
+ using namespace AMDGPU::Hwreg;
BuildMI(MBB, MI, nullptr, TII->get(AMDGPU::S_SETREG_IMM32_B32))
.addImm(Value)
- .addImm(((Width - 1) << AMDGPU::Hwreg::WIDTH_M1_SHIFT_) |
- (Offset << AMDGPU::Hwreg::OFFSET_SHIFT_) |
- (AMDGPU::Hwreg::ID_MODE << AMDGPU::Hwreg::ID_SHIFT_));
+ .addImm(HwregEncoding::encode(ID_MODE, Offset, Width));
++NumSetregInserted;
Changed = true;
InstrMode.Mask &= ~(((1 << Width) - 1) << Offset);
@@ -276,15 +275,11 @@ void SIModeRegister::processBlockPhase1(MachineBasicBlock &MBB,
// as we assume it has been inserted by a higher authority (this is
// likely to be a very rare occurrence).
unsigned Dst = TII->getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
- if (((Dst & AMDGPU::Hwreg::ID_MASK_) >> AMDGPU::Hwreg::ID_SHIFT_) !=
- AMDGPU::Hwreg::ID_MODE)
+ using namespace AMDGPU::Hwreg;
+ auto [Id, Offset, Width] = HwregEncoding::decode(Dst);
+ if (Id != ID_MODE)
continue;
- unsigned Width = ((Dst & AMDGPU::Hwreg::WIDTH_M1_MASK_) >>
- AMDGPU::Hwreg::WIDTH_M1_SHIFT_) +
- 1;
- unsigned Offset =
- (Dst & AMDGPU::Hwreg::OFFSET_MASK_) >> AMDGPU::Hwreg::OFFSET_SHIFT_;
unsigned Mask = maskTrailingOnes<unsigned>(Width) << Offset;
// If an InsertionPoint is set we will insert a setreg there.
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index dacdf7b5cd9a..ce91e05e5cc8 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -1698,22 +1698,14 @@ int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
return (Idx < 0) ? Idx : Opr[Idx].Encoding;
}
-bool isValidHwreg(int64_t Id) {
- return 0 <= Id && isUInt<ID_WIDTH_>(Id);
-}
+bool isValidHwreg(int64_t Id) { return 0 <= Id && isUInt<HwregId::Width>(Id); }
bool isValidHwregOffset(int64_t Offset) {
- return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
+ return 0 <= Offset && isUInt<HwregOffset::Width>(Offset);
}
bool isValidHwregWidth(int64_t Width) {
- return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
-}
-
-uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) {
- return (Id << ID_SHIFT_) |
- (Offset << OFFSET_SHIFT_) |
- ((Width - 1) << WIDTH_M1_SHIFT_);
+ return 0 <= (Width - 1) && isUInt<HwregSize::Width>(Width - 1);
}
StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
@@ -1721,12 +1713,6 @@ StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
return (Idx < 0) ? "" : Opr[Idx].Name;
}
-void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
- Id = (Val & ID_MASK_) >> ID_SHIFT_;
- Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
- Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
-}
-
} // namespace Hwreg
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index f35e77445282..6826cd273195 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -322,6 +322,35 @@ getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
} // end namespace IsaInfo
+// Represents a field in an encoded value.
+template <unsigned HighBit, unsigned LowBit, unsigned D = 0>
+struct EncodingField {
+ static_assert(HighBit >= LowBit, "Invalid bit range!");
+ static constexpr unsigned Offset = LowBit;
+ static constexpr unsigned Width = HighBit - LowBit + 1;
+
+ using ValueType = unsigned;
+ static constexpr ValueType Default = D;
+
+ ValueType Value;
+ constexpr EncodingField(ValueType Value) : Value(Value) {}
+
+ constexpr uint64_t encode() const { return Value; }
+ static ValueType decode(uint64_t Encoded) { return Encoded; }
+};
+
+// A helper for encoding and decoding multiple fields.
+template <typename... Fields> struct EncodingFields {
+ static constexpr uint64_t encode(Fields... Values) {
+ return ((Values.encode() << Values.Offset) | ...);
+ }
+
+ static std::tuple<typename Fields::ValueType...> decode(uint64_t Encoded) {
+ return {Fields::decode((Encoded >> Fields::Offset) &
+ maxUIntN(Fields::Width))...};
+ }
+};
+
LLVM_READONLY
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx);
@@ -870,15 +899,6 @@ struct Waitcnt {
: LoadCnt(LoadCnt), ExpCnt(ExpCnt), DsCnt(DsCnt), StoreCnt(StoreCnt),
SampleCnt(SampleCnt), BvhCnt(BvhCnt), KmCnt(KmCnt) {}
- static Waitcnt allZero(bool Extended, bool HasStorecnt) {
- return Extended ? Waitcnt(0, 0, 0, 0, 0, 0, 0)
- : Waitcnt(0, 0, 0, HasStorecnt ? 0 : ~0u);
- }
-
- static Waitcnt allZeroExceptVsCnt(bool Extended) {
- return Extended ? Waitcnt(0, 0, 0, ~0u, 0, 0, 0) : Waitcnt(0, 0, 0, ~0u);
- }
-
bool hasWait() const { return StoreCnt != ~0u || hasWaitExceptStoreCnt(); }
bool hasWaitExceptStoreCnt() const {
@@ -1030,6 +1050,17 @@ unsigned encodeStorecntDscnt(const IsaVersion &Version, const Waitcnt &Decoded);
namespace Hwreg {
+using HwregId = EncodingField<5, 0>;
+using HwregOffset = EncodingField<10, 6>;
+
+struct HwregSize : EncodingField<15, 11, 32> {
+ using EncodingField::EncodingField;
+ constexpr uint64_t encode() const { return Value - 1; }
+ static ValueType decode(uint64_t Encoded) { return Encoded + 1; }
+};
+
+using HwregEncoding = EncodingFields<HwregId, HwregOffset, HwregSize>;
+
LLVM_READONLY
int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI);
@@ -1043,13 +1074,8 @@ LLVM_READNONE
bool isValidHwregWidth(int64_t Width);
LLVM_READNONE
-uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width);
-
-LLVM_READNONE
StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI);
-void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width);
-
} // namespace Hwreg
namespace DepCtr {
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 99f8e8ede4ac..f5424cf48d7a 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -380,9 +380,9 @@ class VOP_MOVREL<RegisterOperand Src1RC> : VOPProfile<[untyped, i32, untyped, un
let OutsDPP = (outs Src0RC32:$vdst);
let InsDPP16 = (ins Src0RC32:$old, Src0RC32:$src0,
dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask,
- DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, DppFI:$fi);
+ DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi);
let AsmDPP16 = getAsmDPP16<1, 1, 0>.ret;
- let InsDPP8 = (ins Src0RC32:$old, Src0RC32:$src0, dpp8:$dpp8, DppFI:$fi);
+ let InsDPP8 = (ins Src0RC32:$old, Src0RC32:$src0, dpp8:$dpp8, Dpp8FI:$fi);
let AsmDPP8 = getAsmDPP8<1, 1, 0>.ret;
let OutsVOP3DPP = (outs Src0RC64:$vdst);
@@ -749,7 +749,7 @@ class VOP1_DPP16<bits<8> op, VOP1_DPP_Pseudo ps, int subtarget, VOPProfile p = p
class VOP1_DPP16_Gen<bits<8> op, VOP1_DPP_Pseudo ps, GFXGen Gen, VOPProfile p = ps.Pfl> :
VOP1_DPP16 <op, ps, Gen.Subtarget, p> {
let AssemblerPredicate = Gen.AssemblerPredicate;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
}
class VOP1_DPP8<bits<8> op, VOP1_Pseudo ps, VOPProfile p = ps.Pfl> :
@@ -770,7 +770,7 @@ class VOP1_DPP8<bits<8> op, VOP1_Pseudo ps, VOPProfile p = ps.Pfl> :
class VOP1_DPP8_Gen<bits<8> op, VOP1_Pseudo ps, GFXGen Gen, VOPProfile p = ps.Pfl> :
VOP1_DPP8<op, ps, p> {
let AssemblerPredicate = Gen.AssemblerPredicate;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
}
//===----------------------------------------------------------------------===//
@@ -816,7 +816,7 @@ multiclass VOP1_Real_dpp_with_name<GFXGen Gen, bits<9> op, string opName,
string asmName> {
defvar ps = !cast<VOP1_Pseudo>(opName#"_e32");
let AsmString = asmName # ps.Pfl.AsmDPP16,
- DecoderNamespace = "DPP" # Gen.DecoderNamespace #
+ DecoderNamespace = Gen.DecoderNamespace #
!if(ps.Pfl.IsRealTrue16, "", "_FAKE16") in {
defm NAME : VOP1_Real_dpp<Gen, op, opName>;
}
@@ -831,7 +831,7 @@ multiclass VOP1_Real_dpp8_with_name<GFXGen Gen, bits<9> op, string opName,
string asmName> {
defvar ps = !cast<VOP1_Pseudo>(opName#"_e32");
let AsmString = asmName # ps.Pfl.AsmDPP8,
- DecoderNamespace = "DPP8" # Gen.DecoderNamespace #
+ DecoderNamespace = Gen.DecoderNamespace #
!if(ps.Pfl.IsRealTrue16, "", "_FAKE16") in {
defm NAME : VOP1_Real_dpp8<Gen, op, opName>;
}
@@ -994,9 +994,7 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in {
}
multiclass VOP1_Real_dpp8_gfx10<bits<9> op> {
if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExt32BitDPP then
- def _dpp8_gfx10 : VOP1_DPP8<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32")> {
- let DecoderNamespace = "DPP8";
- }
+ def _dpp8_gfx10 : VOP1_DPP8<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32")>;
}
} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10"
@@ -1192,16 +1190,14 @@ class VOP1_DPPe <bits<8> op, VOP1_DPP_Pseudo ps, VOPProfile P = ps.Pfl> :
let Inst{31-25} = 0x3f; //encoding
}
-multiclass VOP1Only_Real_vi <bits<10> op> {
- let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in {
+let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in {
+ multiclass VOP1Only_Real_vi <bits<10> op> {
def _vi :
VOP1_Real<!cast<VOP1_Pseudo>(NAME), SIEncodingFamily.VI>,
VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME).Pfl>;
}
-}
-multiclass VOP1_Real_e32e64_vi <bits<10> op> {
- let AssemblerPredicate = isGFX8GFX9, DecoderNamespace = "GFX8" in {
+ multiclass VOP1_Real_e32e64_vi <bits<10> op> {
def _e32_vi :
VOP1_Real<!cast<VOP1_Pseudo>(NAME#"_e32"), SIEncodingFamily.VI>,
VOP1e<op{7-0}, !cast<VOP1_Pseudo>(NAME#"_e32").Pfl>;
@@ -1389,44 +1385,41 @@ def : GCNPat <
// GFX9
//===----------------------------------------------------------------------===//
-multiclass VOP1_Real_gfx9 <bits<10> op> {
- let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in {
+let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in {
+ multiclass VOP1_Real_gfx9 <bits<10> op> {
defm NAME : VOP1_Real_e32e64_vi <op>;
- }
-
- if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtSDWA9 then
- def _sdwa_gfx9 :
- VOP_SDWA9_Real <!cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa")>,
- VOP1_SDWA9Ae <op{7-0}, !cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa").Pfl>;
-
- if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtDPP then
- def _dpp_gfx9 :
- VOP_DPP_Real<!cast<VOP1_DPP_Pseudo>(NAME#"_dpp"), SIEncodingFamily.GFX9>,
- VOP1_DPPe<op{7-0}, !cast<VOP1_DPP_Pseudo>(NAME#"_dpp")>;
-
-}
-multiclass VOP1_Real_NoDstSel_SDWA_gfx9 <bits<10> op> {
- let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in {
- defm NAME : VOP1_Real_e32e64_vi <op>;
+ if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtSDWA9 then
+ def _sdwa_gfx9 :
+ VOP_SDWA9_Real <!cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa")>,
+ VOP1_SDWA9Ae <op{7-0}, !cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa").Pfl>;
+
+ if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtDPP then
+ def _dpp_gfx9 :
+ VOP_DPP_Real<!cast<VOP1_DPP_Pseudo>(NAME#"_dpp"), SIEncodingFamily.GFX9>,
+ VOP1_DPPe<op{7-0}, !cast<VOP1_DPP_Pseudo>(NAME#"_dpp")>;
}
- if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtSDWA9 then
- def _sdwa_gfx9 :
- VOP_SDWA9_Real <!cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa")>,
- VOP1_SDWA9Ae <op{7-0}, !cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa").Pfl> {
- let Inst{42-40} = 6;
- }
+ multiclass VOP1_Real_NoDstSel_SDWA_gfx9 <bits<10> op> {
+ defm NAME : VOP1_Real_e32e64_vi <op>;
- if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtDPP then
- def _dpp_gfx9 :
- VOP_DPP_Real<!cast<VOP1_DPP_Pseudo>(NAME#"_dpp"), SIEncodingFamily.GFX9>,
- VOP1_DPPe<op{7-0}, !cast<VOP1_DPP_Pseudo>(NAME#"_dpp")>;
+ if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtSDWA9 then
+ def _sdwa_gfx9 :
+ VOP_SDWA9_Real <!cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa")>,
+ VOP1_SDWA9Ae <op{7-0}, !cast<VOP1_SDWA_Pseudo>(NAME#"_sdwa").Pfl> {
+ let Inst{42-40} = 6;
+ }
+
+ if !cast<VOP1_Pseudo>(NAME#"_e32").Pfl.HasExtDPP then
+ def _dpp_gfx9 :
+ VOP_DPP_Real<!cast<VOP1_DPP_Pseudo>(NAME#"_dpp"), SIEncodingFamily.GFX9>,
+ VOP1_DPPe<op{7-0}, !cast<VOP1_DPP_Pseudo>(NAME#"_dpp")>;
+ }
}
defm V_SCREEN_PARTITION_4SE_B32 : VOP1_Real_gfx9 <0x37>;
-let AssemblerPredicate = isGFX940Plus, DecoderNamespace = "GFX9" in
+let AssemblerPredicate = isGFX940Plus in
defm V_MOV_B64 : VOP1_Real_gfx9 <0x38>;
let OtherPredicates = [HasFP8ConversionInsts] in {
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 4437d5f2a033..13fe79b47596 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -430,7 +430,7 @@ class VOP_MAC <ValueType vt0, ValueType vt1=vt0> : VOPProfile <[vt0, vt1, vt1, v
getVregSrcForVT<Src2VT>.ret:$src2, // stub argument
dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask,
DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl);
- let InsDPP16 = !con(InsDPP, (ins DppFI:$fi));
+ let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi));
let InsVOP3Base = getInsVOP3Base<Src0VOP3DPP, Src1VOP3DPP, RegisterOperand<VGPR_32>, 3,
0, HasModifiers, HasModifiers, HasOMod,
Src0ModVOP3DPP, Src1ModVOP3DPP, Src2Mod, HasOpSel>.ret;
@@ -447,7 +447,7 @@ class VOP_MAC <ValueType vt0, ValueType vt1=vt0> : VOPProfile <[vt0, vt1, vt1, v
let InsDPP8 = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0,
Src1ModDPP:$src1_modifiers, Src1DPP:$src1,
getVregSrcForVT<Src2VT>.ret:$src2, // stub argument
- dpp8:$dpp8, DppFI:$fi);
+ dpp8:$dpp8, Dpp8FI:$fi);
let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0,
Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1,
getVregSrcForVT<Src2VT>.ret:$src2, // stub argument
@@ -500,7 +500,7 @@ def VOP_MAC_F16_t16 : VOP_MAC <f16> {
let InsDPP8 = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0,
Src1ModDPP:$src1_modifiers, Src1DPP:$src1,
getVregSrcForVT<Src2VT, 1/*IsTrue16*/, 1/*IsFake16*/>.ret:$src2, // stub argument
- dpp8:$dpp8, DppFI:$fi);
+ dpp8:$dpp8, Dpp8FI:$fi);
let Src2Mod = FP32InputMods; // dummy unused modifiers
let Src2RC64 = VGPRSrc_32; // stub argument
}
@@ -552,11 +552,11 @@ def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped], /*EnableClamp=*/
Src1DPP:$src1,
dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask,
DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl);
- let InsDPP16 = !con(InsDPP, (ins DppFI:$fi));
+ let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi));
let InsDPP8 = (ins DstRCDPP:$old,
Src0DPP:$src0,
Src1DPP:$src1,
- dpp8:$dpp8, DppFI:$fi);
+ dpp8:$dpp8, Dpp8FI:$fi);
let Outs32 = (outs DstRC:$vdst);
let Outs64 = (outs DstRC:$vdst, VOPDstS64orS32:$sdst);
let OutsVOP3DPP = Outs64;
@@ -594,11 +594,11 @@ def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1], /*EnableClamp=*/1>
Src1DPP:$src1,
dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask,
DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl);
- let InsDPP16 = !con(InsDPP, (ins DppFI:$fi));
+ let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi));
let InsDPP8 = (ins DstRCDPP:$old,
Src0DPP:$src0,
Src1DPP:$src1,
- dpp8:$dpp8, DppFI:$fi);
+ dpp8:$dpp8, Dpp8FI:$fi);
let HasExt = 1;
let HasExtDPP = 1;
@@ -645,11 +645,11 @@ class VOP2e_SGPR<list<ValueType> ArgVT> : VOPProfile<ArgVT> {
FPVRegInputMods:$src1_modifiers, Src1DPP:$src1,
dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask,
DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl);
- let InsDPP16 = !con(InsDPP, (ins DppFI:$fi));
+ let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi));
let InsDPP8 = (ins DstRCDPP:$old,
FPVRegInputMods:$src0_modifiers, Src0DPP:$src0,
FPVRegInputMods:$src1_modifiers, Src1DPP:$src1,
- dpp8:$dpp8, DppFI:$fi);
+ dpp8:$dpp8, Dpp8FI:$fi);
let Src0ModVOP3DPP = FPVRegInputMods;
let Src1ModVOP3DPP = FPVRegInputMods;
@@ -1273,7 +1273,7 @@ class VOP2_DPP16_Gen<bits<6> op, VOP2_DPP_Pseudo ps, GFXGen Gen,
VOP2_DPP16<op, ps, Gen.Subtarget, opName, p> {
let AssemblerPredicate = Gen.AssemblerPredicate;
let OtherPredicates = !if(ps.Pfl.IsRealTrue16, [UseRealTrue16Insts], []);
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace#
+ let DecoderNamespace = Gen.DecoderNamespace#
!if(ps.Pfl.IsRealTrue16, "", "_FAKE16");
}
@@ -1302,7 +1302,7 @@ class VOP2_DPP8_Gen<bits<6> op, VOP2_Pseudo ps, GFXGen Gen,
VOP2_DPP8<op, ps, p> {
let AssemblerPredicate = Gen.AssemblerPredicate;
let OtherPredicates = !if(ps.Pfl.IsRealTrue16, [UseRealTrue16Insts], []);
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace#
+ let DecoderNamespace = Gen.DecoderNamespace#
!if(ps.Pfl.IsRealTrue16, "", "_FAKE16");
}
@@ -1748,9 +1748,7 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in {
}
multiclass VOP2_Real_dpp8_gfx10<bits<6> op> {
if !cast<VOP2_Pseudo>(NAME#"_e32").Pfl.HasExt32BitDPP then
- def _dpp8_gfx10 : VOP2_DPP8<op, !cast<VOP2_Pseudo>(NAME#"_e32")> {
- let DecoderNamespace = "DPP8";
- }
+ def _dpp8_gfx10 : VOP2_DPP8<op, !cast<VOP2_Pseudo>(NAME#"_e32")>;
}
//===------------------------- VOP2 (with name) -------------------------===//
@@ -1797,7 +1795,6 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in {
def _dpp8_gfx10 : VOP2_DPP8<op, !cast<VOP2_Pseudo>(opName#"_e32")> {
VOP2_Pseudo ps = !cast<VOP2_Pseudo>(opName#"_e32");
let AsmString = asmName # ps.Pfl.AsmDPP8;
- let DecoderNamespace = "DPP8";
}
}
@@ -1876,7 +1873,6 @@ let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in {
VOP2_DPP8<op, !cast<VOP2_Pseudo>(opName#"_e32")> {
string AsmDPP8 = !cast<VOP2_Pseudo>(opName#"_e32").Pfl.AsmDPP8;
let AsmString = asmName # !subst(", vcc", "", AsmDPP8);
- let DecoderNamespace = "DPP8";
}
if !cast<VOP2_Pseudo>(opName#"_e32").Pfl.HasExt32BitDPP then
def _dpp8_w32_gfx10 :
@@ -2231,7 +2227,7 @@ multiclass VOP2_SDWA9_Real <bits<6> op> {
VOP2_SDWA9Ae <op{5-0}, !cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa").Pfl>;
}
-let AssemblerPredicate = isGFX8Only in {
+let AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8" in {
multiclass VOP2be_Real_e32e64_vi_only <bits<6> op, string OpName, string AsmName> {
def _e32_vi :
@@ -2239,14 +2235,12 @@ multiclass VOP2be_Real_e32e64_vi_only <bits<6> op, string OpName, string AsmName
VOP2e<op{5-0}, !cast<VOP2_Pseudo>(OpName#"_e32").Pfl> {
VOP2_Pseudo ps = !cast<VOP2_Pseudo>(OpName#"_e32");
let AsmString = AsmName # ps.AsmOperands;
- let DecoderNamespace = "GFX8";
}
def _e64_vi :
VOP3_Real<!cast<VOP3_Pseudo>(OpName#"_e64"), SIEncodingFamily.VI>,
VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(OpName#"_e64").Pfl> {
VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName#"_e64");
let AsmString = AsmName # ps.AsmOperands;
- let DecoderNamespace = "GFX8";
}
if !cast<VOP2_Pseudo>(OpName#"_e32").Pfl.HasExtSDWA then
def _sdwa_vi :
@@ -2263,9 +2257,10 @@ multiclass VOP2be_Real_e32e64_vi_only <bits<6> op, string OpName, string AsmName
let AsmString = AsmName # ps.AsmOperands;
}
}
-}
-let AssemblerPredicate = isGFX9Only in {
+} // End AssemblerPredicate = isGFX8Only, DecoderNamespace = "GFX8"
+
+let AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9" in {
multiclass VOP2be_Real_e32e64_gfx9 <bits<6> op, string OpName, string AsmName> {
def _e32_gfx9 :
@@ -2273,14 +2268,12 @@ multiclass VOP2be_Real_e32e64_gfx9 <bits<6> op, string OpName, string AsmName> {
VOP2e<op{5-0}, !cast<VOP2_Pseudo>(OpName#"_e32").Pfl> {
VOP2_Pseudo ps = !cast<VOP2_Pseudo>(OpName#"_e32");
let AsmString = AsmName # ps.AsmOperands;
- let DecoderNamespace = "GFX9";
}
def _e64_gfx9 :
VOP3_Real<!cast<VOP3_Pseudo>(OpName#"_e64"), SIEncodingFamily.GFX9>,
VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(OpName#"_e64").Pfl> {
VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName#"_e64");
let AsmString = AsmName # ps.AsmOperands;
- let DecoderNamespace = "GFX9";
}
if !cast<VOP2_Pseudo>(OpName#"_e32").Pfl.HasExtSDWA9 then
def _sdwa_gfx9 :
@@ -2295,21 +2288,16 @@ multiclass VOP2be_Real_e32e64_gfx9 <bits<6> op, string OpName, string AsmName> {
VOP2_DPPe<op, !cast<VOP2_DPP_Pseudo>(OpName#"_dpp")> {
VOP2_DPP_Pseudo ps = !cast<VOP2_DPP_Pseudo>(OpName#"_dpp");
let AsmString = AsmName # ps.AsmOperands;
- let DecoderNamespace = "GFX9";
}
}
multiclass VOP2_Real_e32e64_gfx9 <bits<6> op> {
def _e32_gfx9 :
VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.GFX9>,
- VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>{
- let DecoderNamespace = "GFX9";
- }
+ VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>;
def _e64_gfx9 :
VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX9>,
- VOP3e_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
- let DecoderNamespace = "GFX9";
- }
+ VOP3e_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
if !cast<VOP2_Pseudo>(NAME#"_e32").Pfl.HasExtSDWA9 then
def _sdwa_gfx9 :
VOP_SDWA9_Real <!cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa")>,
@@ -2318,12 +2306,10 @@ multiclass VOP2_Real_e32e64_gfx9 <bits<6> op> {
if !cast<VOP2_Pseudo>(NAME#"_e32").Pfl.HasExtDPP then
def _dpp_gfx9 :
VOP_DPP_Real<!cast<VOP2_DPP_Pseudo>(NAME#"_dpp"), SIEncodingFamily.GFX9>,
- VOP2_DPPe<op, !cast<VOP2_DPP_Pseudo>(NAME#"_dpp")> {
- let DecoderNamespace = "GFX9";
- }
+ VOP2_DPPe<op, !cast<VOP2_DPP_Pseudo>(NAME#"_dpp")>;
}
-} // AssemblerPredicate = isGFX9Only
+} // End AssemblerPredicate = isGFX9Only, DecoderNamespace = "GFX9"
multiclass VOP2_Real_e32e64_vi <bits<6> op> :
Base_VOP2_Real_e32e64_vi<op>, VOP2_SDWA_Real<op>, VOP2_SDWA9_Real<op> {
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 396ae9c9d92e..7198a4022dae 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -532,11 +532,11 @@ def VOP3_CVT_PK_F8_F32_Profile : VOP3_Profile<VOP_I32_F32_F32, VOP3_OPSEL> {
FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1,
VGPR_32:$vdst_in, op_sel0:$op_sel,
dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask,
- DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, DppFI:$fi);
+ DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi);
let InsVOP3DPP8 = (ins VGPR_32:$old,
FP32InputMods:$src0_modifiers, Src0VOP3DPP:$src0,
FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1,
- VGPR_32:$vdst_in, op_sel0:$op_sel, dpp8:$dpp8, DppFI:$fi);
+ VGPR_32:$vdst_in, op_sel0:$op_sel, dpp8:$dpp8, Dpp8FI:$fi);
let HasClamp = 0;
let HasExtVOP3DPP = 1;
@@ -553,12 +553,12 @@ def VOP3_CVT_SR_F8_F32_Profile : VOP3_Profile<VOPProfile<[i32, f32, i32, f32]>,
FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1,
FP32InputMods:$src2_modifiers, VGPR_32:$src2,
op_sel0:$op_sel, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask,
- DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, DppFI:$fi);
+ DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi);
let InsVOP3DPP8 = (ins VGPR_32:$old,
FP32InputMods:$src0_modifiers, Src0VOP3DPP:$src0,
FP32InputMods:$src1_modifiers, Src1VOP3DPP:$src1,
FP32InputMods:$src2_modifiers, VGPR_32:$src2,
- op_sel0:$op_sel, dpp8:$dpp8, DppFI:$fi);
+ op_sel0:$op_sel, dpp8:$dpp8, Dpp8FI:$fi);
let HasClamp = 0;
let HasSrc2 = 0;
let HasSrc2Mods = 1;
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 74f451b6d4f7..ac3c8f95306b 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -461,13 +461,13 @@ def VOP3P_DOTF8_Profile : VOP3P_Profile<VOPProfile <[f32, i32, i32, f32]>,
let InsVOP3DPP8 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1,
PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2,
- neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp8:$dpp8, DppFI:$fi);
+ neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp8:$dpp8, Dpp8FI:$fi);
let InsVOP3DPP16 = (ins DstRC:$old, VGPR_32:$src0, VRegSrc_32:$src1,
PackedF16InputMods:$src2_modifiers, VRegSrc_32:$src2,
neg_lo0:$neg_lo, neg_hi0:$neg_hi, dpp_ctrl:$dpp_ctrl,
DppRowMask:$row_mask, DppBankMask:$bank_mask,
- DppBoundCtrl:$bound_ctrl, DppFI:$fi);
+ DppBoundCtrl:$bound_ctrl, Dpp16FI:$fi);
}
multiclass VOP3PDOTF8Inst <string OpName, SDPatternOperator intrinsic_node> {
@@ -1353,6 +1353,7 @@ class VOP3P_DPP16<bits<7> op, VOP_DPP_Pseudo ps, int subtarget,
let AssemblerPredicate = HasDPP16;
let SubtargetPredicate = HasDPP16;
let OtherPredicates = ps.OtherPredicates;
+ let IsPacked = ps.IsPacked;
}
class VOP3P_DPP8_Base<bits<7> op, VOP_Pseudo ps, string opName = ps.OpName>
@@ -1362,6 +1363,7 @@ class VOP3P_DPP8_Base<bits<7> op, VOP_Pseudo ps, string opName = ps.OpName>
let SchedRW = ps.SchedRW;
let Uses = ps.Uses;
let OtherPredicates = ps.OtherPredicates;
+ let IsPacked = ps.IsPacked;
}
//===----------------------------------------------------------------------===//
@@ -1486,7 +1488,7 @@ multiclass VOP3P_Real_dpp<GFXGen Gen, bits<7> op, string backing_ps_name = NAME,
: VOP3P_DPP16<op, !cast<VOP_DPP_Pseudo>(backing_ps_name #"_dpp"),
Gen.Subtarget> {
let AsmString = asmName #ps.Pfl.AsmVOP3DPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
let AssemblerPredicate = Gen.AssemblerPredicate;
}
}
@@ -1496,7 +1498,7 @@ multiclass VOP3P_Real_dpp8<GFXGen Gen, bits<7> op, string backing_ps_name = NAME
defvar ps = !cast<VOP3P_Pseudo>(backing_ps_name);
def _dpp8#Gen.Suffix : VOP3P_DPP8_Base<op, ps> {
let AsmString = asmName #ps.Pfl.AsmVOP3DPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
let AssemblerPredicate = Gen.AssemblerPredicate;
}
}
@@ -1613,7 +1615,7 @@ multiclass VOP3P_Real_MFMA_gfx940_aliases<string NameFrom, string NameTo, string
multiclass VOP3P_Real_MFMA_gfx940<bits<7> op, string Name = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
VOP3_Pseudo PS_ACD = !cast<VOP3_Pseudo>(NAME # "_e64"),
VOP3_Pseudo PS_VCD = !cast<VOP3_Pseudo>(NAME # "_vgprcd" # "_e64")> {
- let SubtargetPredicate = isGFX940Plus,
+ let AssemblerPredicate = isGFX940Plus,
DecoderNamespace = "GFX940",
AsmString = Name # PS_ACD.AsmOperands, Constraints = "" in {
def _gfx940_acd : VOP3P_Real<PS_ACD, SIEncodingFamily.GFX940>,
diff --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
index fe52a0e39e4f..e5e82447d55f 100644
--- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
@@ -222,6 +222,8 @@ class VOPCInstAlias <VOP3_Pseudo ps, Instruction inst,
let AsmVariantName = AMDGPUAsmVariants.Default;
let SubtargetPredicate = AssemblerPredicate;
+
+ string DecoderNamespace; // dummy
}
multiclass VOPCInstAliases <string old_name, string Arch, string real_name = old_name, string mnemonic_from = real_name> {
@@ -766,7 +768,7 @@ class VOPC_Class_Profile<list<SchedReadWrite> sched, ValueType src0VT, ValueType
let AsmDPP = "$src0_modifiers, $src1 $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
let AsmDPP16 = AsmDPP#"$fi";
let InsDPP = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0, Src1DPP:$src1, dpp_ctrl:$dpp_ctrl, DppRowMask:$row_mask, DppBankMask:$bank_mask, DppBoundCtrl:$bound_ctrl);
- let InsDPP16 = !con(InsDPP, (ins DppFI:$fi));
+ let InsDPP16 = !con(InsDPP, (ins Dpp16FI:$fi));
// DPP8 forbids modifiers and can inherit from VOPC_Profile
let Ins64 = (ins Src0Mod:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1);
@@ -1331,196 +1333,176 @@ class VOPC64_DPP8_NoDst<bits<10> op, VOP_Pseudo ps, string opName = ps.OpName>
//===----------------------------------------------------------------------===//
multiclass VOPC_Real_Base<GFXGen Gen, bits<9> op> {
- let AssemblerPredicate = Gen.AssemblerPredicate in {
+ let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in {
defvar ps32 = !cast<VOPC_Pseudo>(NAME#"_e32");
defvar ps64 = !cast<VOP3_Pseudo>(NAME#"_e64");
- let DecoderNamespace = Gen.DecoderNamespace in {
- def _e32#Gen.Suffix : VOPC_Real<ps32, Gen.Subtarget>,
- VOPCe<op{7-0}>;
- def _e64#Gen.Suffix : VOP3_Real<ps64, Gen.Subtarget>,
- VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> {
- // Encoding used for VOPC instructions encoded as VOP3 differs from
- // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
- bits<8> sdst;
- let Inst{7-0} = sdst;
- }
- } // End DecoderNamespace = Gen.DecoderNamespace
+ def _e32#Gen.Suffix : VOPC_Real<ps32, Gen.Subtarget>,
+ VOPCe<op{7-0}>;
+ def _e64#Gen.Suffix : VOP3_Real<ps64, Gen.Subtarget>,
+ VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> {
+ // Encoding used for VOPC instructions encoded as VOP3 differs from
+ // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
+ bits<8> sdst;
+ let Inst{7-0} = sdst;
+ }
defm : VOPCInstAliases<NAME, !substr(Gen.Suffix,1)>;
if ps32.Pfl.HasExtDPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(NAME #"_e32" #"_dpp");
defvar AsmDPP = ps32.Pfl.AsmDPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC<op{7-0}, psDPP, Gen.Subtarget>;
- def _e32_dpp_w32#Gen.Suffix : VOPC_DPP16<op{7-0}, psDPP> {
- let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e32_dpp_w64#Gen.Suffix : VOPC_DPP16<op{7-0}, psDPP> {
- let AsmString = psDPP.OpName # " vcc, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC<op{7-0}, psDPP, Gen.Subtarget>;
+ def _e32_dpp_w32#Gen.Suffix : VOPC_DPP16<op{7-0}, psDPP> {
+ let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e32_dpp_w64#Gen.Suffix : VOPC_DPP16<op{7-0}, psDPP> {
+ let AsmString = psDPP.OpName # " vcc, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
defvar AsmDPP8 = ps32.Pfl.AsmDPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32>;
- def _e32_dpp8_w32#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32> {
- let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e32_dpp8_w64#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32> {
- let AsmString = ps32.OpName # " vcc, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32>;
+ def _e32_dpp8_w32#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32> {
+ let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e32_dpp8_w64#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32> {
+ let AsmString = ps32.OpName # " vcc, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
}
if ps64.Pfl.HasExtVOP3DPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(NAME #"_e64" #"_dpp");
defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP>,
- SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget>;
- def _e64_dpp_w32#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> {
- let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e64_dpp_w64#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> {
- let AsmString = psDPP.OpName # " vcc, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP>,
+ SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget>;
+ def _e64_dpp_w32#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> {
+ let AsmString = psDPP.OpName # " vcc_lo, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e64_dpp_w64#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP> {
+ let AsmString = psDPP.OpName # " vcc, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64>;
- def _e64_dpp8_w32#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> {
- let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e64_dpp8_w64#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> {
- let AsmString = ps32.OpName # " vcc, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64>;
+ def _e64_dpp8_w32#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> {
+ let AsmString = ps32.OpName # " vcc_lo, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e64_dpp8_w64#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64> {
+ let AsmString = ps32.OpName # " vcc, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
}
- } // AssemblerPredicate = Gen.AssemblerPredicate
+ } // AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace
}
multiclass VOPC_Real_with_name<GFXGen Gen, bits<9> op, string OpName,
string asm_name, string pseudo_mnemonic = ""> {
- let AssemblerPredicate = Gen.AssemblerPredicate in {
+ let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in {
defvar ps32 = !cast<VOPC_Pseudo>(OpName#"_e32");
defvar ps64 = !cast<VOP3_Pseudo>(OpName#"_e64");
- let DecoderNamespace = Gen.DecoderNamespace in {
- def _e32#Gen.Suffix :
- // 32 and 64 bit forms of the instruction have _e32 and _e64
- // respectively appended to their assembly mnemonic.
- // _e64 is printed as part of the VOPDstS64orS32 operand, whereas
- // the destination-less 32bit forms add it to the asmString here.
- VOPC_Real<ps32, Gen.Subtarget, asm_name#"_e32">,
- VOPCe<op{7-0}>,
- MnemonicAlias<!if(!empty(pseudo_mnemonic), ps32.Mnemonic,
- pseudo_mnemonic),
- asm_name, ps32.AsmVariantName>,
- Requires<[Gen.AssemblerPredicate]>;
- def _e64#Gen.Suffix :
- VOP3_Real<ps64, Gen.Subtarget, asm_name>,
- VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl>,
- MnemonicAlias<!if(!empty(pseudo_mnemonic), ps64.Mnemonic,
- pseudo_mnemonic),
- asm_name, ps64.AsmVariantName>,
- Requires<[Gen.AssemblerPredicate]> {
- // Encoding used for VOPC instructions encoded as VOP3 differs from
- // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
- bits<8> sdst;
- let Inst{7-0} = sdst;
- }
- } // End DecoderNamespace = Gen.DecoderNamespace
+ def _e32#Gen.Suffix :
+ // 32 and 64 bit forms of the instruction have _e32 and _e64
+ // respectively appended to their assembly mnemonic.
+ // _e64 is printed as part of the VOPDstS64orS32 operand, whereas
+ // the destination-less 32bit forms add it to the asmString here.
+ VOPC_Real<ps32, Gen.Subtarget, asm_name#"_e32">,
+ VOPCe<op{7-0}>,
+ MnemonicAlias<!if(!empty(pseudo_mnemonic), ps32.Mnemonic,
+ pseudo_mnemonic),
+ asm_name, ps32.AsmVariantName>,
+ Requires<[Gen.AssemblerPredicate]>;
+ def _e64#Gen.Suffix :
+ VOP3_Real<ps64, Gen.Subtarget, asm_name>,
+ VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl>,
+ MnemonicAlias<!if(!empty(pseudo_mnemonic), ps64.Mnemonic,
+ pseudo_mnemonic),
+ asm_name, ps64.AsmVariantName>,
+ Requires<[Gen.AssemblerPredicate]> {
+ // Encoding used for VOPC instructions encoded as VOP3 differs from
+ // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
+ bits<8> sdst;
+ let Inst{7-0} = sdst;
+ }
defm : VOPCInstAliases<OpName, !substr(Gen.Suffix, 1), NAME, asm_name>;
if ps32.Pfl.HasExtDPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(OpName #"_e32" #"_dpp");
defvar AsmDPP = ps32.Pfl.AsmDPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC<op{7-0}, psDPP,
- Gen.Subtarget, asm_name>;
- def _e32_dpp_w32#Gen.Suffix
- : VOPC_DPP16<op{7-0}, psDPP, asm_name> {
- let AsmString = asm_name # " vcc_lo, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e32_dpp_w64#Gen.Suffix
- : VOPC_DPP16<op{7-0}, psDPP, asm_name> {
- let AsmString = asm_name # " vcc, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC<op{7-0}, psDPP,
+ Gen.Subtarget, asm_name>;
+ def _e32_dpp_w32#Gen.Suffix
+ : VOPC_DPP16<op{7-0}, psDPP, asm_name> {
+ let AsmString = asm_name # " vcc_lo, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e32_dpp_w64#Gen.Suffix
+ : VOPC_DPP16<op{7-0}, psDPP, asm_name> {
+ let AsmString = asm_name # " vcc, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
defvar AsmDPP8 = ps32.Pfl.AsmDPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32, asm_name>;
- def _e32_dpp8_w32#Gen.Suffix
- : VOPC_DPP8<op{7-0}, ps32, asm_name> {
- let AsmString = asm_name # " vcc_lo, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e32_dpp8_w64#Gen.Suffix
- : VOPC_DPP8<op{7-0}, ps32, asm_name> {
- let AsmString = asm_name # " vcc, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32, asm_name>;
+ def _e32_dpp8_w32#Gen.Suffix
+ : VOPC_DPP8<op{7-0}, ps32, asm_name> {
+ let AsmString = asm_name # " vcc_lo, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e32_dpp8_w64#Gen.Suffix
+ : VOPC_DPP8<op{7-0}, ps32, asm_name> {
+ let AsmString = asm_name # " vcc, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
}
if ps64.Pfl.HasExtVOP3DPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(OpName #"_e64" #"_dpp");
defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name>,
- SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget>;
- def _e64_dpp_w32#Gen.Suffix
- : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> {
- let AsmString = asm_name # " vcc_lo, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e64_dpp_w64#Gen.Suffix
- : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> {
- let AsmString = asm_name # " vcc, " # AsmDPP;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e64_dpp#Gen.Suffix : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name>,
+ SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget>;
+ def _e64_dpp_w32#Gen.Suffix
+ : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> {
+ let AsmString = asm_name # " vcc_lo, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e64_dpp_w64#Gen.Suffix
+ : VOPC64_DPP16_Dst<{0, op}, psDPP, asm_name> {
+ let AsmString = asm_name # " vcc, " # AsmDPP;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name>;
- def _e64_dpp8_w32#Gen.Suffix
- : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> {
- let AsmString = asm_name # " vcc_lo, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave32;
- }
- def _e64_dpp8_w64#Gen.Suffix
- : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> {
- let AsmString = asm_name # " vcc, " # AsmDPP8;
- let isAsmParserOnly = 1;
- let WaveSizePredicate = isWave64;
- }
+ def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name>;
+ def _e64_dpp8_w32#Gen.Suffix
+ : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> {
+ let AsmString = asm_name # " vcc_lo, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave32;
+ }
+ def _e64_dpp8_w64#Gen.Suffix
+ : VOPC64_DPP8_Dst<{0, op}, ps64, asm_name> {
+ let AsmString = asm_name # " vcc, " # AsmDPP8;
+ let isAsmParserOnly = 1;
+ let WaveSizePredicate = isWave64;
}
}
- } // AssemblerPredicate = Gen.AssemblerPredicate
+ } // End AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace
}
multiclass VOPC_Real_t16<GFXGen Gen, bits<9> op, string asm_name,
@@ -1528,123 +1510,103 @@ multiclass VOPC_Real_t16<GFXGen Gen, bits<9> op, string asm_name,
VOPC_Real_with_name<Gen, op, OpName, asm_name, pseudo_mnemonic>;
multiclass VOPCX_Real<GFXGen Gen, bits<9> op> {
- let AssemblerPredicate = Gen.AssemblerPredicate in {
+ let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in {
defvar ps32 = !cast<VOPC_Pseudo>(NAME#"_nosdst_e32");
defvar ps64 = !cast<VOP3_Pseudo>(NAME#"_nosdst_e64");
- let DecoderNamespace = Gen.DecoderNamespace in {
- def _e32#Gen.Suffix :
- VOPC_Real<ps32, Gen.Subtarget>,
- VOPCe<op{7-0}> {
- let AsmString = !subst("_nosdst", "", ps32.PseudoInstr)
- # " " # ps32.AsmOperands;
- }
- def _e64#Gen.Suffix :
- VOP3_Real<ps64, Gen.Subtarget>,
- VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> {
- let Inst{7-0} = ?; // sdst
- let AsmString = !subst("_nosdst", "", ps64.Mnemonic)
- # "{_e64} " # ps64.AsmOperands;
- }
- } // End DecoderNamespace = Gen.DecoderNamespace
+ def _e32#Gen.Suffix :
+ VOPC_Real<ps32, Gen.Subtarget>,
+ VOPCe<op{7-0}> {
+ let AsmString = !subst("_nosdst", "", ps32.PseudoInstr)
+ # " " # ps32.AsmOperands;
+ }
+ def _e64#Gen.Suffix :
+ VOP3_Real<ps64, Gen.Subtarget>,
+ VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> {
+ let Inst{7-0} = ?; // sdst
+ let AsmString = !subst("_nosdst", "", ps64.Mnemonic)
+ # "{_e64} " # ps64.AsmOperands;
+ }
defm : VOPCXInstAliases<NAME, !substr(Gen.Suffix, 1)>;
if ps32.Pfl.HasExtDPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(NAME #"_nosdst_e32" #"_dpp");
defvar AsmDPP = ps32.Pfl.AsmDPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e32_dpp#Gen.Suffix
- : VOPC_DPP16_SIMC<op{7-0}, psDPP, Gen.Subtarget> {
- let AsmString = !subst("_nosdst", "", psDPP.OpName) # " " # AsmDPP;
- }
+ def _e32_dpp#Gen.Suffix
+ : VOPC_DPP16_SIMC<op{7-0}, psDPP, Gen.Subtarget> {
+ let AsmString = !subst("_nosdst", "", psDPP.OpName) # " " # AsmDPP;
}
defvar AsmDPP8 = ps32.Pfl.AsmDPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32> {
- let AsmString = !subst("_nosdst", "", ps32.OpName) # " " # AsmDPP8;
- }
+ def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32> {
+ let AsmString = !subst("_nosdst", "", ps32.OpName) # " " # AsmDPP8;
}
}
if ps64.Pfl.HasExtVOP3DPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(NAME #"_nosdst_e64" #"_dpp");
defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e64_dpp#Gen.Suffix
- : VOPC64_DPP16_NoDst<{0, op}, psDPP>,
- SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget> {
- let AsmString = !subst("_nosdst", "", psDPP.OpName)
- # "{_e64_dpp} " # AsmDPP;
- }
+ def _e64_dpp#Gen.Suffix
+ : VOPC64_DPP16_NoDst<{0, op}, psDPP>,
+ SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget> {
+ let AsmString = !subst("_nosdst", "", psDPP.OpName)
+ # "{_e64_dpp} " # AsmDPP;
}
defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64> {
- let AsmString = !subst("_nosdst", "", ps64.OpName)
- # "{_e64_dpp} " # AsmDPP8;
- }
+ def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64> {
+ let AsmString = !subst("_nosdst", "", ps64.OpName)
+ # "{_e64_dpp} " # AsmDPP8;
}
}
- } // AssemblerPredicate = Gen.AssemblerPredicate
+ } // End AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace
}
multiclass VOPCX_Real_with_name<GFXGen Gen, bits<9> op, string OpName,
string asm_name, string pseudo_mnemonic = ""> {
- let AssemblerPredicate = Gen.AssemblerPredicate in {
+ let AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace in {
defvar ps32 = !cast<VOPC_Pseudo>(OpName#"_nosdst_e32");
defvar ps64 = !cast<VOP3_Pseudo>(OpName#"_nosdst_e64");
- let DecoderNamespace = Gen.DecoderNamespace in {
- def _e32#Gen.Suffix
- : VOPC_Real<ps32, Gen.Subtarget, asm_name>,
- MnemonicAlias<!if(!empty(pseudo_mnemonic), !subst("_nosdst", "", ps32.Mnemonic),
- pseudo_mnemonic),
- asm_name, ps32.AsmVariantName>,
- Requires<[Gen.AssemblerPredicate]>,
- VOPCe<op{7-0}> {
- let AsmString = asm_name # "{_e32} " # ps32.AsmOperands;
- }
- def _e64#Gen.Suffix
- : VOP3_Real<ps64, Gen.Subtarget, asm_name>,
- MnemonicAlias<!if(!empty(pseudo_mnemonic), !subst("_nosdst", "", ps64.Mnemonic),
- pseudo_mnemonic),
- asm_name, ps64.AsmVariantName>,
- Requires<[Gen.AssemblerPredicate]>,
- VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> {
- let Inst{7-0} = ? ; // sdst
- let AsmString = asm_name # "{_e64} " # ps64.AsmOperands;
- }
- } // End DecoderNamespace = Gen.DecoderNamespace
+ def _e32#Gen.Suffix
+ : VOPC_Real<ps32, Gen.Subtarget, asm_name>,
+ MnemonicAlias<!if(!empty(pseudo_mnemonic), !subst("_nosdst", "", ps32.Mnemonic),
+ pseudo_mnemonic),
+ asm_name, ps32.AsmVariantName>,
+ Requires<[Gen.AssemblerPredicate]>,
+ VOPCe<op{7-0}> {
+ let AsmString = asm_name # "{_e32} " # ps32.AsmOperands;
+ }
+ def _e64#Gen.Suffix
+ : VOP3_Real<ps64, Gen.Subtarget, asm_name>,
+ MnemonicAlias<!if(!empty(pseudo_mnemonic), !subst("_nosdst", "", ps64.Mnemonic),
+ pseudo_mnemonic),
+ asm_name, ps64.AsmVariantName>,
+ Requires<[Gen.AssemblerPredicate]>,
+ VOP3a_gfx11_gfx12<{0, op}, ps64.Pfl> {
+ let Inst{7-0} = ? ; // sdst
+ let AsmString = asm_name # "{_e64} " # ps64.AsmOperands;
+ }
defm : VOPCXInstAliases<OpName, !substr(Gen.Suffix, 1), NAME, asm_name>;
if ps32.Pfl.HasExtDPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(OpName#"_nosdst_e32"#"_dpp");
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC<op{7-0}, psDPP,
- Gen.Subtarget, asm_name>;
- }
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32, asm_name>;
- }
+ def _e32_dpp#Gen.Suffix : VOPC_DPP16_SIMC<op{7-0}, psDPP,
+ Gen.Subtarget, asm_name>;
+ def _e32_dpp8#Gen.Suffix : VOPC_DPP8<op{7-0}, ps32, asm_name>;
}
if ps64.Pfl.HasExtVOP3DPP then {
defvar psDPP = !cast<VOP_DPP_Pseudo>(OpName#"_nosdst_e64"#"_dpp");
defvar AsmDPP = ps64.Pfl.AsmVOP3DPP16;
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace in {
- def _e64_dpp#Gen.Suffix
- : VOPC64_DPP16_NoDst<{0, op}, psDPP, asm_name>,
- SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget> {
- let AsmString = asm_name # "{_e64_dpp} " # AsmDPP;
- }
+ def _e64_dpp#Gen.Suffix
+ : VOPC64_DPP16_NoDst<{0, op}, psDPP, asm_name>,
+ SIMCInstr<psDPP.PseudoInstr, Gen.Subtarget> {
+ let AsmString = asm_name # "{_e64_dpp} " # AsmDPP;
}
defvar AsmDPP8 = ps64.Pfl.AsmVOP3DPP8;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace in {
- def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64, asm_name> {
- let AsmString = asm_name # "{_e64_dpp} " # AsmDPP8;
- }
+ def _e64_dpp8#Gen.Suffix : VOPC64_DPP8_NoDst<{0, op}, ps64, asm_name> {
+ let AsmString = asm_name # "{_e64_dpp} " # AsmDPP8;
}
}
- } // AssemblerPredicate = Gen.AssemblerPredicate
+ } // End AssemblerPredicate = Gen.AssemblerPredicate, DecoderNamespace = Gen.DecoderNamespace
}
multiclass VOPCX_Real_t16<GFXGen Gen, bits<9> op, string asm_name,
@@ -1873,21 +1835,19 @@ defm V_CMPX_CLASS_F64 : VOPCX_Real_gfx11_gfx12<0x0ff>;
// GFX10.
//===----------------------------------------------------------------------===//
-let AssemblerPredicate = isGFX10Only in {
+let AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10" in {
multiclass VOPC_Real_gfx10<bits<9> op> {
- let DecoderNamespace = "GFX10" in {
- def _e32_gfx10 :
- VOPC_Real<!cast<VOPC_Pseudo>(NAME#"_e32"), SIEncodingFamily.GFX10>,
- VOPCe<op{7-0}>;
- def _e64_gfx10 :
- VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX10>,
- VOP3a_gfx10<{0, op}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
- // Encoding used for VOPC instructions encoded as VOP3 differs from
- // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
- bits<8> sdst;
- let Inst{7-0} = sdst;
- }
- } // End DecoderNamespace = "GFX10"
+ def _e32_gfx10 :
+ VOPC_Real<!cast<VOPC_Pseudo>(NAME#"_e32"), SIEncodingFamily.GFX10>,
+ VOPCe<op{7-0}>;
+ def _e64_gfx10 :
+ VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX10>,
+ VOP3a_gfx10<{0, op}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
+ // Encoding used for VOPC instructions encoded as VOP3 differs from
+ // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
+ bits<8> sdst;
+ let Inst{7-0} = sdst;
+ }
if !cast<VOPC_Pseudo>(NAME#"_e32").Pfl.HasExtSDWA9 then
def _sdwa_gfx10 :
@@ -1898,22 +1858,20 @@ let AssemblerPredicate = isGFX10Only in {
}
multiclass VOPCX_Real_gfx10<bits<9> op> {
- let DecoderNamespace = "GFX10" in {
- def _e32_gfx10 :
- VOPC_Real<!cast<VOPC_Pseudo>(NAME#"_nosdst_e32"), SIEncodingFamily.GFX10>,
- VOPCe<op{7-0}> {
- let AsmString = !subst("_nosdst", "", !cast<VOPC_Pseudo>(NAME#"_nosdst_e32").PseudoInstr)
- # " " # !cast<VOPC_Pseudo>(NAME#"_nosdst_e32").AsmOperands;
- }
-
- def _e64_gfx10 :
- VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_nosdst_e64"), SIEncodingFamily.GFX10>,
- VOP3a_gfx10<{0, op}, !cast<VOP3_Pseudo>(NAME#"_nosdst_e64").Pfl> {
- let Inst{7-0} = ?; // sdst
- let AsmString = !subst("_nosdst", "", !cast<VOP3_Pseudo>(NAME#"_nosdst_e64").Mnemonic)
- # "{_e64} " # !cast<VOP3_Pseudo>(NAME#"_nosdst_e64").AsmOperands;
- }
- } // End DecoderNamespace = "GFX10"
+ def _e32_gfx10 :
+ VOPC_Real<!cast<VOPC_Pseudo>(NAME#"_nosdst_e32"), SIEncodingFamily.GFX10>,
+ VOPCe<op{7-0}> {
+ let AsmString = !subst("_nosdst", "", !cast<VOPC_Pseudo>(NAME#"_nosdst_e32").PseudoInstr)
+ # " " # !cast<VOPC_Pseudo>(NAME#"_nosdst_e32").AsmOperands;
+ }
+
+ def _e64_gfx10 :
+ VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_nosdst_e64"), SIEncodingFamily.GFX10>,
+ VOP3a_gfx10<{0, op}, !cast<VOP3_Pseudo>(NAME#"_nosdst_e64").Pfl> {
+ let Inst{7-0} = ?; // sdst
+ let AsmString = !subst("_nosdst", "", !cast<VOP3_Pseudo>(NAME#"_nosdst_e64").Mnemonic)
+ # "{_e64} " # !cast<VOP3_Pseudo>(NAME#"_nosdst_e64").AsmOperands;
+ }
if !cast<VOPC_Pseudo>(NAME#"_nosdst_e32").Pfl.HasExtSDWA9 then
def _sdwa_gfx10 :
@@ -1925,7 +1883,7 @@ let AssemblerPredicate = isGFX10Only in {
defm : VOPCXInstAliases<NAME, "gfx10">;
}
-} // End AssemblerPredicate = isGFX10Only
+} // End AssemblerPredicate = isGFX10Only, DecoderNamespace = "GFX10"
defm V_CMP_LT_I16 : VOPC_Real_gfx10<0x089>;
defm V_CMP_EQ_I16 : VOPC_Real_gfx10<0x08a>;
@@ -1990,25 +1948,23 @@ defm V_CMPX_TRU_F16 : VOPCX_Real_gfx10<0x0ff>;
// GFX6, GFX7, GFX10.
//===----------------------------------------------------------------------===//
-let AssemblerPredicate = isGFX6GFX7 in {
+let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in {
multiclass VOPC_Real_gfx6_gfx7<bits<9> op> {
- let DecoderNamespace = "GFX6GFX7" in {
- def _e32_gfx6_gfx7 :
- VOPC_Real<!cast<VOPC_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>,
- VOPCe<op{7-0}>;
- def _e64_gfx6_gfx7 :
- VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
- VOP3a_gfx6_gfx7<op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
- // Encoding used for VOPC instructions encoded as VOP3 differs from
- // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
- bits<8> sdst;
- let Inst{7-0} = sdst;
- }
- } // End DecoderNamespace = "GFX6GFX7"
+ def _e32_gfx6_gfx7 :
+ VOPC_Real<!cast<VOPC_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>,
+ VOPCe<op{7-0}>;
+ def _e64_gfx6_gfx7 :
+ VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
+ VOP3a_gfx6_gfx7<op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
+ // Encoding used for VOPC instructions encoded as VOP3 differs from
+ // VOP3e by destination name (sdst) as VOPC doesn't have vector dst.
+ bits<8> sdst;
+ let Inst{7-0} = sdst;
+ }
defm : VOPCInstAliases<NAME, "gfx6_gfx7">;
}
-} // End AssemblerPredicate = isGFX6GFX7
+} // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7"
multiclass VOPC_Real_gfx6_gfx7_gfx10<bits<9> op> :
VOPC_Real_gfx6_gfx7<op>, VOPC_Real_gfx10<op>;
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 801afabbdb14..80d7d96a5e3c 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -818,6 +818,7 @@ class VOP_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[],
let VALU = 1;
let DPP = 1;
let Size = 8;
+ let IsPacked = P.IsPacked;
let ReadsModeReg = !or(P.DstVT.isFP, P.Src0VT.isFP);
@@ -835,7 +836,7 @@ class VOP_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[],
AMDGPUAsmVariants.Disable);
let Constraints = !if(P.NumSrcArgs, P.TieRegDPP # " = $vdst", "");
let DisableEncoding = !if(P.NumSrcArgs, P.TieRegDPP, "");
- let DecoderNamespace = "DPP";
+ let DecoderNamespace = "GFX8";
VOPProfile Pfl = P;
}
@@ -906,7 +907,7 @@ class VOP_DPP_Base <string OpName, VOPProfile P,
AMDGPUAsmVariants.Disable);
let Constraints = !if(P.NumSrcArgs, P.TieRegDPP # " = $vdst", "");
let DisableEncoding = !if(P.NumSrcArgs, P.TieRegDPP, "");
- let DecoderNamespace = "DPP";
+ let DecoderNamespace = "GFX8";
}
class VOP_DPP <string OpName, VOPProfile P, bit IsDPP16,
@@ -1350,7 +1351,7 @@ class VOP3_DPP16_Gen<bits<10> op, VOP_DPP_Pseudo ps, GFXGen Gen,
VOP3_DPP16 <op, ps, Gen.Subtarget, opName> {
let AssemblerPredicate = Gen.AssemblerPredicate;
let True16Predicate = !if(ps.Pfl.IsRealTrue16, UseRealTrue16Insts, NoTrue16Predicate);
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace#
+ let DecoderNamespace = Gen.DecoderNamespace#
!if(ps.Pfl.IsRealTrue16, "", "_FAKE16");
}
@@ -1463,7 +1464,7 @@ multiclass VOP3_Real_dpp_with_name<GFXGen Gen, bits<10> op, string opName,
multiclass VOP3_Real_dpp8_Base<GFXGen Gen, bits<10> op, string opName = NAME> {
defvar ps = !cast<VOP3_Pseudo>(opName#"_e64");
def _e64_dpp8#Gen.Suffix : Base_VOP3_DPP8<op, ps> {
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
let AssemblerPredicate = Gen.AssemblerPredicate;
}
}
@@ -1473,7 +1474,7 @@ multiclass VOP3Dot_Real_dpp8_Base<GFXGen Gen, bits<10> op, string opName = NAME>
def _e64_dpp8#Gen.Suffix : Base_VOP3_DPP8<op, ps> {
let Inst{11} = ?;
let Inst{12} = ?;
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
let AssemblerPredicate = Gen.AssemblerPredicate;
}
}
@@ -1482,7 +1483,7 @@ multiclass VOP3_Real_dpp8_with_name<GFXGen Gen, bits<10> op, string opName,
string asmName> {
defvar ps = !cast<VOP3_Pseudo>(opName#"_e64");
let AsmString = asmName # ps.Pfl.AsmVOP3DPP8,
- DecoderNamespace = "DPP8"#Gen.DecoderNamespace#
+ DecoderNamespace = Gen.DecoderNamespace#
!if(ps.Pfl.IsRealTrue16, "", "_FAKE16"),
True16Predicate = !if(ps.Pfl.IsRealTrue16, UseRealTrue16Insts,
NoTrue16Predicate) in {
@@ -1505,7 +1506,7 @@ multiclass VOP3be_Real_dpp<GFXGen Gen, bits<10> op, string opName,
defvar dpp_ps = !cast<VOP_DPP_Pseudo>(opName #"_e64" #"_dpp");
def _e64_dpp#Gen.Suffix : Base_VOP3b_DPP16<op, dpp_ps, asmName>,
SIMCInstr<dpp_ps.PseudoInstr, Gen.Subtarget> {
- let DecoderNamespace = "DPP"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
let AssemblerPredicate = Gen.AssemblerPredicate;
}
}
@@ -1514,7 +1515,7 @@ multiclass VOP3be_Real_dpp8<GFXGen Gen, bits<10> op, string opName,
string asmName> {
defvar ps = !cast<VOP3_Pseudo>(opName #"_e64");
def _e64_dpp8#Gen.Suffix : VOP3b_DPP8_Base<op, ps, asmName> {
- let DecoderNamespace = "DPP8"#Gen.DecoderNamespace;
+ let DecoderNamespace = Gen.DecoderNamespace;
let AssemblerPredicate = Gen.AssemblerPredicate;
}
}
diff --git a/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp b/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
index c5199aab7527..00a29f8ecb23 100644
--- a/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
@@ -25,42 +25,6 @@
using namespace llvm;
using namespace LegalizeActions;
-/// FIXME: The following static functions are SizeChangeStrategy functions
-/// that are meant to temporarily mimic the behaviour of the old legalization
-/// based on doubling/halving non-legal types as closely as possible. This is
-/// not entirly possible as only legalizing the types that are exactly a power
-/// of 2 times the size of the legal types would require specifying all those
-/// sizes explicitly.
-/// In practice, not specifying those isn't a problem, and the below functions
-/// should disappear quickly as we add support for legalizing non-power-of-2
-/// sized types further.
-static void addAndInterleaveWithUnsupported(
- LegacyLegalizerInfo::SizeAndActionsVec &result,
- const LegacyLegalizerInfo::SizeAndActionsVec &v) {
- for (unsigned i = 0; i < v.size(); ++i) {
- result.push_back(v[i]);
- if (i + 1 < v[i].first && i + 1 < v.size() &&
- v[i + 1].first != v[i].first + 1)
- result.push_back({v[i].first + 1, LegacyLegalizeActions::Unsupported});
- }
-}
-
-static LegacyLegalizerInfo::SizeAndActionsVec
-widen_8_16(const LegacyLegalizerInfo::SizeAndActionsVec &v) {
- assert(v.size() >= 1);
- assert(v[0].first > 17);
- LegacyLegalizerInfo::SizeAndActionsVec result = {
- {1, LegacyLegalizeActions::Unsupported},
- {8, LegacyLegalizeActions::WidenScalar},
- {9, LegacyLegalizeActions::Unsupported},
- {16, LegacyLegalizeActions::WidenScalar},
- {17, LegacyLegalizeActions::Unsupported}};
- addAndInterleaveWithUnsupported(result, v);
- auto Largest = result.back().first;
- result.push_back({Largest + 1, LegacyLegalizeActions::Unsupported});
- return result;
-}
-
static bool AEABI(const ARMSubtarget &ST) {
return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI();
}
@@ -118,15 +82,14 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
.libcallFor({s32})
.clampScalar(0, s32, s32);
- for (unsigned Op : {G_SREM, G_UREM}) {
- LegacyInfo.setLegalizeScalarToDifferentSizeStrategy(Op, 0, widen_8_16);
- if (HasHWDivide)
- LegacyInfo.setAction({Op, s32}, LegacyLegalizeActions::Lower);
- else if (AEABI(ST))
- LegacyInfo.setAction({Op, s32}, LegacyLegalizeActions::Custom);
- else
- LegacyInfo.setAction({Op, s32}, LegacyLegalizeActions::Libcall);
- }
+ auto &REMBuilder =
+ getActionDefinitionsBuilder({G_SREM, G_UREM}).minScalar(0, s32);
+ if (HasHWDivide)
+ REMBuilder.lowerFor({s32});
+ else if (AEABI(ST))
+ REMBuilder.customFor({s32});
+ else
+ REMBuilder.libcallFor({s32});
getActionDefinitionsBuilder(G_INTTOPTR)
.legalFor({{p0, s32}})
@@ -202,8 +165,7 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
LoadStoreBuilder.maxScalar(0, s32);
- for (auto Ty : {s32, s64})
- LegacyInfo.setAction({G_FNEG, Ty}, LegacyLegalizeActions::Lower);
+ getActionDefinitionsBuilder(G_FNEG).lowerFor({s32, s64});
getActionDefinitionsBuilder(G_FCONSTANT).customFor({s32, s64});
diff --git a/llvm/lib/Target/DirectX/DXIL.td b/llvm/lib/Target/DirectX/DXIL.td
index 52158139a258..8a3454c89542 100644
--- a/llvm/lib/Target/DirectX/DXIL.td
+++ b/llvm/lib/Target/DirectX/DXIL.td
@@ -35,30 +35,18 @@ def BinaryUintCategory : DXILOpCategory<"Binary uint">;
def UnaryFloatCategory : DXILOpCategory<"Unary float">;
def ComputeIDCategory : DXILOpCategory<"Compute/Mesh/Amplification shader">;
-// Following are the scalar types supported by DXIL operations and are synonymous
-// to llvm_*_ty defined for readability and ease of use in the context of this file.
-
-def voidTy : LLVMType<isVoid>;
-
-// Floating point types
-def f16Ty : LLVMType<f16>;
-def f32Ty : LLVMType<f32>;
-def f64Ty : LLVMType<f64>;
-
-// Integer types
-def i1Ty : LLVMType<i1>;
-def i8Ty : LLVMType<i8>;
-def i16Ty : LLVMType<i16>;
-def i32Ty : LLVMType<i32>;
-def i64Ty : LLVMType<i64>;
+// Represent as any pointer type with an option to change to a qualified pointer
+// type with address space specified.
+def dxil_handle_ty : LLVMAnyPointerType;
+def dxil_cbuffer_ty : LLVMAnyPointerType;
+def dxil_resource_ty : LLVMAnyPointerType;
// The parameter description for a DXIL operation
-class DXILOpParameter<int pos, string type, string name, string doc,
+class DXILOpParameter<int pos, LLVMType type, string name, string doc,
bit isConstant = 0, string enumName = "",
int maxValue = 0> {
int Pos = pos; // Position in parameter list
- string Type = type; // LLVM type name, $o for overload, $r for resource
- // type, $cb for legacy cbuffer, $u4 for u4 struct
+ LLVMType ParamType = type; // Parameter type
string Name = name; // Short, unique parameter name
string Doc = doc; // Description of this parameter
bit IsConstant = isConstant; // Whether this parameter requires a constant value in the IR
@@ -108,55 +96,55 @@ class DXILOperation<string name, int opCode, DXILOpClass opClass, DXILOpCategory
class LLVMIntrinsic<Intrinsic llvm_intrinsic_> { Intrinsic llvm_intrinsic = llvm_intrinsic_; }
def Sin : DXILOperation<"Sin", 13, UnaryClass, UnaryFloatCategory, "returns sine(theta) for theta in radians.",
- [f16Ty,f32Ty], ReadNone,
+ [llvm_half_ty, llvm_float_ty], ReadNone,
[
- DXILOpParameter<0, "$o", "", "operation result">,
- DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
- DXILOpParameter<2, "$o", "value", "input value">
+ DXILOpParameter<0, llvm_anyfloat_ty, "", "operation result">,
+ DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
+ DXILOpParameter<2, llvm_anyfloat_ty, "value", "input value">
],
["floats"]>,
LLVMIntrinsic<int_sin>;
-def UMax : DXILOperation< "UMax", 39, BinaryClass, BinaryUintCategory, "unsigned integer maximum. UMax(a,b) = a > b ? a : b",
- [i16Ty,i32Ty,i64Ty], ReadNone,
+def UMax : DXILOperation< "UMax", 39, BinaryClass, BinaryUintCategory, "unsigned integer maximum. UMax(a,b) = a > b ? a : b",
+ [llvm_i16_ty, llvm_i32_ty, llvm_i64_ty], ReadNone,
[
- DXILOpParameter<0, "$o", "", "operation result">,
- DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
- DXILOpParameter<2, "$o", "a", "input value">,
- DXILOpParameter<3, "$o", "b", "input value">
+ DXILOpParameter<0, llvm_anyint_ty, "", "operation result">,
+ DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
+ DXILOpParameter<2, llvm_anyint_ty, "a", "input value">,
+ DXILOpParameter<3, llvm_anyint_ty, "b", "input value">
],
["uints"]>,
LLVMIntrinsic<int_umax>;
-def ThreadId : DXILOperation< "ThreadId", 93, ThreadIdClass, ComputeIDCategory, "reads the thread ID", [i32Ty], ReadNone,
+def ThreadId : DXILOperation< "ThreadId", 93, ThreadIdClass, ComputeIDCategory, "reads the thread ID", [llvm_i32_ty], ReadNone,
[
- DXILOpParameter<0, "i32", "", "thread ID component">,
- DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
- DXILOpParameter<2, "i32", "component", "component to read (x,y,z)">
+ DXILOpParameter<0, llvm_i32_ty, "", "thread ID component">,
+ DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
+ DXILOpParameter<2, llvm_i32_ty, "component", "component to read (x,y,z)">
]>,
LLVMIntrinsic<int_dx_thread_id>;
-def GroupId : DXILOperation< "GroupId", 94, GroupIdClass, ComputeIDCategory, "reads the group ID (SV_GroupID)", [i32Ty], ReadNone,
+def GroupId : DXILOperation< "GroupId", 94, GroupIdClass, ComputeIDCategory, "reads the group ID (SV_GroupID)", [llvm_i32_ty], ReadNone,
[
- DXILOpParameter<0, "i32", "", "group ID component">,
- DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
- DXILOpParameter<2, "i32", "component", "component to read">
+ DXILOpParameter<0, llvm_i32_ty, "", "group ID component">,
+ DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
+ DXILOpParameter<2, llvm_i32_ty, "component", "component to read">
]>,
LLVMIntrinsic<int_dx_group_id>;
-def ThreadIdInGroup : DXILOperation< "ThreadIdInGroup", 95, ThreadIdInGroupClass, ComputeIDCategory,
- "reads the thread ID within the group (SV_GroupThreadID)", [i32Ty], ReadNone,
+def ThreadIdInGroup : DXILOperation< "ThreadIdInGroup", 95, ThreadIdInGroupClass, ComputeIDCategory,
+ "reads the thread ID within the group (SV_GroupThreadID)", [llvm_i32_ty], ReadNone,
[
- DXILOpParameter<0, "i32", "", "thread ID in group component">,
- DXILOpParameter<1, "i32", "opcode", "DXIL opcode">,
- DXILOpParameter<2, "i32", "component", "component to read (x,y,z)">
+ DXILOpParameter<0, llvm_i32_ty, "", "thread ID in group component">,
+ DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">,
+ DXILOpParameter<2, llvm_i32_ty, "component", "component to read (x,y,z)">
]>,
LLVMIntrinsic<int_dx_thread_id_in_group>;
-def FlattenedThreadIdInGroup : DXILOperation< "FlattenedThreadIdInGroup", 96, FlattenedThreadIdInGroupClass, ComputeIDCategory,
- "provides a flattened index for a given thread within a given group (SV_GroupIndex)", [i32Ty], ReadNone,
+def FlattenedThreadIdInGroup : DXILOperation< "FlattenedThreadIdInGroup", 96, FlattenedThreadIdInGroupClass, ComputeIDCategory,
+ "provides a flattened index for a given thread within a given group (SV_GroupIndex)", [llvm_i32_ty], ReadNone,
[
- DXILOpParameter<0, "i32", "", "result">,
- DXILOpParameter<1, "i32", "opcode", "DXIL opcode">
+ DXILOpParameter<0, llvm_i32_ty, "", "result">,
+ DXILOpParameter<1, llvm_i32_ty, "opcode", "DXIL opcode">
]>,
LLVMIntrinsic<int_dx_flattened_thread_id_in_group>;
diff --git a/llvm/lib/Target/Hexagon/CMakeLists.txt b/llvm/lib/Target/Hexagon/CMakeLists.txt
index 76f99b4d3ec5..2870f0bb6ad3 100644
--- a/llvm/lib/Target/Hexagon/CMakeLists.txt
+++ b/llvm/lib/Target/Hexagon/CMakeLists.txt
@@ -33,6 +33,7 @@ add_llvm_target(HexagonCodeGen
HexagonFrameLowering.cpp
HexagonGenExtract.cpp
HexagonGenInsert.cpp
+ HexagonGenMemAbsolute.cpp
HexagonGenMux.cpp
HexagonGenPredicate.cpp
HexagonHardwareLoops.cpp
@@ -50,6 +51,7 @@ add_llvm_target(HexagonCodeGen
HexagonOptAddrMode.cpp
HexagonOptimizeSZextends.cpp
HexagonPeephole.cpp
+ HexagonPostIncOpt.cpp
HexagonRDFOpt.cpp
HexagonRegisterInfo.cpp
HexagonSelectionDAGInfo.cpp
@@ -60,6 +62,7 @@ add_llvm_target(HexagonCodeGen
HexagonTargetMachine.cpp
HexagonTargetObjectFile.cpp
HexagonTargetTransformInfo.cpp
+ HexagonTfrCleanup.cpp
HexagonVectorCombine.cpp
HexagonVectorLoopCarriedReuse.cpp
HexagonVectorPrint.cpp
diff --git a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 6024d9f7b154..3b8234c01184 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -1957,7 +1957,8 @@ bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) {
return false;
const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
RegHalf H;
- if (!matchHalf(0, RC, 0, H))
+ unsigned B = (RS.Sub == Hexagon::isub_hi) ? 32 : 0;
+ if (!matchHalf(0, RC, B, H))
return false;
if (H.Low)
return false;
diff --git a/llvm/lib/Target/Hexagon/HexagonGenMemAbsolute.cpp b/llvm/lib/Target/Hexagon/HexagonGenMemAbsolute.cpp
new file mode 100644
index 000000000000..afd49631943f
--- /dev/null
+++ b/llvm/lib/Target/Hexagon/HexagonGenMemAbsolute.cpp
@@ -0,0 +1,274 @@
+//===--- HexagonGenMemAbsolute.cpp - Generate Load/Store Set Absolute ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// This pass traverses through all the basic blocks in a function and converts
+// an indexed load/store with offset "0" to a absolute-set load/store
+// instruction as long as the use of the register in the new instruction
+// dominates the rest of the uses and there are more than 2 uses.
+
+#include "HexagonTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+
+#define DEBUG_TYPE "hexagon-abs"
+
+using namespace llvm;
+
+STATISTIC(HexagonNumLoadAbsConversions,
+ "Number of Load instructions converted to absolute-set form");
+STATISTIC(HexagonNumStoreAbsConversions,
+ "Number of Store instructions converted to absolute-set form");
+
+namespace llvm {
+FunctionPass *createHexagonGenMemAbsolute();
+void initializeHexagonGenMemAbsolutePass(PassRegistry &Registry);
+} // namespace llvm
+
+namespace {
+
+class HexagonGenMemAbsolute : public MachineFunctionPass {
+ const HexagonInstrInfo *TII;
+ MachineRegisterInfo *MRI;
+ const TargetRegisterInfo *TRI;
+
+public:
+ static char ID;
+ HexagonGenMemAbsolute() : MachineFunctionPass(ID), TII(0), MRI(0), TRI(0) {
+ initializeHexagonGenMemAbsolutePass(*PassRegistry::getPassRegistry());
+ }
+
+ StringRef getPassName() const override {
+ return "Hexagon Generate Load/Store Set Absolute Address Instruction";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ MachineFunctionPass::getAnalysisUsage(AU);
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ }
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+private:
+ static bool isValidIndexedLoad(int &Opcode, int &NewOpcode);
+ static bool isValidIndexedStore(int &Opcode, int &NewOpcode);
+};
+} // namespace
+
+char HexagonGenMemAbsolute::ID = 0;
+
+INITIALIZE_PASS(HexagonGenMemAbsolute, "hexagon-gen-load-absolute",
+ "Hexagon Generate Load/Store Set Absolute Address Instruction",
+ false, false)
+
+bool HexagonGenMemAbsolute::runOnMachineFunction(MachineFunction &Fn) {
+ if (skipFunction(Fn.getFunction()))
+ return false;
+
+ TII = Fn.getSubtarget<HexagonSubtarget>().getInstrInfo();
+ MRI = &Fn.getRegInfo();
+ TRI = Fn.getRegInfo().getTargetRegisterInfo();
+
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+
+ // Loop over all of the basic blocks
+ for (MachineFunction::iterator MBBb = Fn.begin(), MBBe = Fn.end();
+ MBBb != MBBe; ++MBBb) {
+ MachineBasicBlock *MBB = &*MBBb;
+ // Traverse the basic block
+ for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end();
+ ++MII) {
+ MachineInstr *MI = &*MII;
+ int Opc = MI->getOpcode();
+ if (Opc != Hexagon::CONST32 && Opc != Hexagon::A2_tfrsi)
+ continue;
+
+ const MachineOperand &MO = MI->getOperand(0);
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+
+ unsigned DstReg = MO.getReg();
+ if (MRI->use_nodbg_empty(DstReg))
+ continue;
+
+ typedef MachineRegisterInfo::use_nodbg_iterator use_iterator;
+ use_iterator NextUseMI = MRI->use_nodbg_begin(DstReg);
+
+ MachineInstr *NextMI = NextUseMI->getParent();
+ int NextOpc = NextMI->getOpcode();
+ int NewOpc;
+ bool IsLoad = isValidIndexedLoad(NextOpc, NewOpc);
+
+ if (!IsLoad && !isValidIndexedStore(NextOpc, NewOpc))
+ continue;
+
+ // Base and Offset positions for load and store instructions
+ // Load R(dest), R(base), Imm -> R(dest) = mem(R(base) + Imm)
+ // Store R(base), Imm, R (src) -> mem(R(base) + Imm) = R(src)
+ unsigned BaseRegPos, ImmPos, RegPos;
+ if (!TII->getBaseAndOffsetPosition(*NextMI, BaseRegPos, ImmPos))
+ continue;
+ RegPos = IsLoad ? 0 : 2;
+
+ bool IsGlobal = MI->getOperand(1).isGlobal();
+ if (!MI->getOperand(1).isImm() && !IsGlobal)
+ continue;
+
+ const MachineOperand *BaseOp = nullptr;
+ int64_t Offset;
+ bool Scalable;
+ TII->getMemOperandWithOffset(*NextMI, BaseOp, Offset, Scalable, TRI);
+
+ // Ensure BaseOp is non-null and register type.
+ if (!BaseOp || !BaseOp->isReg())
+ continue;
+
+ if (Scalable)
+ continue;
+
+ unsigned BaseReg = BaseOp->getReg();
+ if ((DstReg != BaseReg) || (Offset != 0))
+ continue;
+
+ const MachineOperand &MO0 = NextMI->getOperand(RegPos);
+
+ if (!MO0.isReg())
+ continue;
+
+ unsigned LoadStoreReg = MO0.getReg();
+
+ // Store: Bail out if the src and base are same (def and use on same
+ // register).
+ if (LoadStoreReg == BaseReg)
+ continue;
+
+ // Insert the absolute-set instruction "I" only if the use of the
+ // BaseReg in "I" dominates the rest of the uses of BaseReg and if
+ // there are more than 2 uses of this BaseReg.
+ bool Dominates = true;
+ unsigned Counter = 0;
+ for (use_iterator I = NextUseMI, E = MRI->use_nodbg_end(); I != E; ++I) {
+ Counter++;
+ if (!MDT.dominates(NextMI, I->getParent()))
+ Dominates = false;
+ }
+
+ if ((!Dominates) || (Counter < 3))
+ continue;
+
+ // If we reach here, we have met all the conditions required for the
+ // replacement of the absolute instruction.
+ LLVM_DEBUG({
+ dbgs() << "Found a pair of instructions for absolute-set "
+ << (IsLoad ? "load" : "store") << "\n";
+ dbgs() << *MI;
+ dbgs() << *NextMI;
+ });
+ MachineBasicBlock *ParentBlock = NextMI->getParent();
+ MachineInstrBuilder MIB;
+ if (IsLoad) { // Insert absolute-set load instruction
+ ++HexagonNumLoadAbsConversions;
+ MIB = BuildMI(*ParentBlock, NextMI, NextMI->getDebugLoc(),
+ TII->get(NewOpc), LoadStoreReg)
+ .addReg(DstReg, RegState::Define);
+ } else { // Insert absolute-set store instruction
+ ++HexagonNumStoreAbsConversions;
+ MIB = BuildMI(*ParentBlock, NextMI, NextMI->getDebugLoc(),
+ TII->get(NewOpc), DstReg);
+ }
+
+ MachineOperand ImmOperand = MI->getOperand(1);
+ if (IsGlobal)
+ MIB.addGlobalAddress(ImmOperand.getGlobal(), ImmOperand.getOffset(),
+ ImmOperand.getTargetFlags());
+ else
+ MIB.addImm(ImmOperand.getImm());
+
+ if (IsLoad)
+ MIB->getOperand(0).setSubReg(MO0.getSubReg());
+ else
+ MIB.addReg(LoadStoreReg, 0, MO0.getSubReg());
+
+ LLVM_DEBUG(dbgs() << "Replaced with " << *MIB << "\n");
+ // Erase the instructions that got replaced.
+ MII = MBB->erase(MI);
+ --MII;
+ NextMI->getParent()->erase(NextMI);
+ }
+ }
+
+ return true;
+}
+
+bool HexagonGenMemAbsolute::isValidIndexedLoad(int &Opc, int &NewOpc) {
+
+ bool Result = true;
+ switch (Opc) {
+ case Hexagon::L2_loadrb_io:
+ NewOpc = Hexagon::L4_loadrb_ap;
+ break;
+ case Hexagon::L2_loadrh_io:
+ NewOpc = Hexagon::L4_loadrh_ap;
+ break;
+ case Hexagon::L2_loadri_io:
+ NewOpc = Hexagon::L4_loadri_ap;
+ break;
+ case Hexagon::L2_loadrd_io:
+ NewOpc = Hexagon::L4_loadrd_ap;
+ break;
+ case Hexagon::L2_loadruh_io:
+ NewOpc = Hexagon::L4_loadruh_ap;
+ break;
+ case Hexagon::L2_loadrub_io:
+ NewOpc = Hexagon::L4_loadrub_ap;
+ break;
+ default:
+ Result = false;
+ }
+
+ return Result;
+}
+
+bool HexagonGenMemAbsolute::isValidIndexedStore(int &Opc, int &NewOpc) {
+
+ bool Result = true;
+ switch (Opc) {
+ case Hexagon::S2_storerd_io:
+ NewOpc = Hexagon::S4_storerd_ap;
+ break;
+ case Hexagon::S2_storeri_io:
+ NewOpc = Hexagon::S4_storeri_ap;
+ break;
+ case Hexagon::S2_storerh_io:
+ NewOpc = Hexagon::S4_storerh_ap;
+ break;
+ case Hexagon::S2_storerb_io:
+ NewOpc = Hexagon::S4_storerb_ap;
+ break;
+ default:
+ Result = false;
+ }
+
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+
+FunctionPass *llvm::createHexagonGenMemAbsolute() {
+ return new HexagonGenMemAbsolute();
+}
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 619c7dc69f9b..91cc9307786b 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -1655,6 +1655,13 @@ bool HexagonInstrInfo::isPostIncrement(const MachineInstr &MI) const {
return getAddrMode(MI) == HexagonII::PostInc;
}
+bool HexagonInstrInfo::isPostIncWithImmOffset(const MachineInstr &MI) const {
+ unsigned BasePos, OffsetPos;
+ if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
+ return false;
+ return isPostIncrement(MI) && MI.getOperand(OffsetPos).isImm();
+}
+
// Returns true if an instruction is predicated irrespective of the predicate
// sense. For example, all of the following will return true.
// if (p0) R1 = add(R2, R3)
@@ -2436,6 +2443,55 @@ bool HexagonInstrInfo::isLoopN(const MachineInstr &MI) const {
Opcode == Hexagon::J2_loop1rext;
}
+bool HexagonInstrInfo::isCircBufferInstr(const MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case Hexagon::L2_loadalignb_pci:
+ case Hexagon::L2_loadalignb_pcr:
+ case Hexagon::L2_loadalignh_pci:
+ case Hexagon::L2_loadalignh_pcr:
+ case Hexagon::L2_loadbsw2_pci:
+ case Hexagon::L2_loadbsw2_pcr:
+ case Hexagon::L2_loadbsw4_pci:
+ case Hexagon::L2_loadbsw4_pcr:
+ case Hexagon::L2_loadbzw2_pci:
+ case Hexagon::L2_loadbzw2_pcr:
+ case Hexagon::L2_loadbzw4_pci:
+ case Hexagon::L2_loadbzw4_pcr:
+ case Hexagon::L2_loadrb_pci:
+ case Hexagon::L2_loadrb_pcr:
+ case Hexagon::L2_loadrd_pci:
+ case Hexagon::L2_loadrd_pcr:
+ case Hexagon::L2_loadrh_pci:
+ case Hexagon::L2_loadrh_pcr:
+ case Hexagon::L2_loadri_pci:
+ case Hexagon::L2_loadri_pcr:
+ case Hexagon::L2_loadrub_pci:
+ case Hexagon::L2_loadrub_pcr:
+ case Hexagon::L2_loadruh_pci:
+ case Hexagon::L2_loadruh_pcr:
+ case Hexagon::S2_storerbnew_pci:
+ case Hexagon::S2_storerbnew_pcr:
+ case Hexagon::S2_storerb_pci:
+ case Hexagon::S2_storerb_pcr:
+ case Hexagon::S2_storerd_pci:
+ case Hexagon::S2_storerd_pcr:
+ case Hexagon::S2_storerf_pci:
+ case Hexagon::S2_storerf_pcr:
+ case Hexagon::S2_storerhnew_pci:
+ case Hexagon::S2_storerhnew_pcr:
+ case Hexagon::S2_storerh_pci:
+ case Hexagon::S2_storerh_pcr:
+ case Hexagon::S2_storerinew_pci:
+ case Hexagon::S2_storerinew_pcr:
+ case Hexagon::S2_storeri_pci:
+ case Hexagon::S2_storeri_pcr:
+ return true;
+ }
+ return false;
+}
+
bool HexagonInstrInfo::isMemOp(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
default: return false;
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index e496995d3ff1..65783c560321 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -434,6 +434,8 @@ public:
bool predCanBeUsedAsDotNew(const MachineInstr &MI, Register PredReg) const;
bool PredOpcodeHasJMP_c(unsigned Opcode) const;
bool predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const;
+ bool isPostIncWithImmOffset(const MachineInstr &MI) const;
+ bool isCircBufferInstr(const MachineInstr &MI) const;
unsigned getAddrMode(const MachineInstr &MI) const;
MachineOperand *getBaseAndOffset(const MachineInstr &MI, int64_t &Offset,
diff --git a/llvm/lib/Target/Hexagon/HexagonPostIncOpt.cpp b/llvm/lib/Target/Hexagon/HexagonPostIncOpt.cpp
new file mode 100644
index 000000000000..4c845f24f76a
--- /dev/null
+++ b/llvm/lib/Target/Hexagon/HexagonPostIncOpt.cpp
@@ -0,0 +1,689 @@
+//===-- HexagonPostIncOpt.cpp - Hexagon Post Increment Optimization Pass --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Convert post-inc addressing mode into base-offset addressing mode.
+// Ex:
+// original loop:
+// v1 = phi(v0, v3)
+// v2,v3 = post_load v1, 4
+
+// Often, unroller creates below form of post-increments:
+// v1 = phi(v0, v3')
+// v2,v3 = post_load v1, 4
+// v2',v3'= post_load v3, 4
+
+// This can be optimized in two ways
+
+// 1.
+// v1 = phi(v0, v3')
+// v2,v3' = post_load v1, 8
+// v2' = load v3', -4
+//
+// 2.
+// v1 = phi(v0, v3')
+// v2,v3' = post_load v1, 8
+// v2' = load v1, 4
+//
+// Option 2 is favored as we can packetize two memory operations in a single
+// packet. However, this is not always favorable due to memory dependences
+// and in cases where we form a bigger chain of post-increment ops that will
+// create more spills as we can not execute post-increment ops with out
+// executing base-offset instructions.
+//===----------------------------------------------------------------------===//
+#include "HexagonInstrInfo.h"
+#include "HexagonSubtarget.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "hexagon-postincopt"
+
+static cl::opt<unsigned> PostIncChainThreshold(
+ "post-inc-chain-threshold", cl::Hidden, cl::init(4),
+ cl::desc("Limit the number of post-inc instructions in a chain."));
+
+static cl::opt<bool> PreferPostIncStore(
+ "prefer-post-inc-store", cl::Hidden, cl::init(true),
+ cl::desc("Prefer post-inc store in a list of loads and stores."));
+
+namespace llvm {
+void initializeHexagonPostIncOptPass(PassRegistry &);
+FunctionPass *createHexagonPostIncOpt();
+} // namespace llvm
+
+namespace {
+
+class HexagonPostIncOpt : public MachineFunctionPass {
+ MachineLoopInfo *MLI = nullptr;
+ const HexagonInstrInfo *HII = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
+ const MachineRegisterInfo *MRI = nullptr;
+ const HexagonSubtarget *HST = nullptr;
+
+public:
+ static char ID;
+
+ HexagonPostIncOpt() : MachineFunctionPass(ID) {
+ initializeHexagonPostIncOptPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addPreserved<AAResultsWrapperPass>();
+ AU.addRequired<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override { return "Hexagon Post-Inc-Opt Pass"; }
+
+ bool runOnMachineFunction(MachineFunction &Fn) override;
+
+private:
+ bool translatePostIncsInLoop(MachineBasicBlock &MBB);
+ void replacePostIncWithBaseOffset(MachineBasicBlock &MBB) const;
+ void replacePostIncWithBaseOffset(MachineInstr &MI) const;
+ bool isPostIncInsn(MachineInstr &MI) const;
+ void foldAdds(MachineBasicBlock &MBB) const;
+ void updateBaseAndOffset(MachineInstr &MI, MachineInstr &AddMI) const;
+ void removeDeadInstructions(MachineBasicBlock &MBB) const;
+
+ void generatePostInc(MachineBasicBlock &MBB);
+ bool canReplaceWithPostInc(MachineInstr *MI, MachineInstr *AddMI) const;
+ void replaceWithPostInc(MachineInstr *MI, MachineInstr *AddMI) const;
+
+ bool isValidOffset(const MachineInstr &MI, int64_t Offset) const;
+ bool isValidPostIncValue(const MachineInstr &MI, int IncVal) const;
+};
+
+class HexagonPostIncOptSchedDAG : public ScheduleDAGInstrs {
+ HexagonPostIncOpt &Pass;
+
+public:
+ HexagonPostIncOptSchedDAG(HexagonPostIncOpt &P, MachineFunction &MF,
+ MachineLoopInfo *MLI)
+ : ScheduleDAGInstrs(MF, MLI, false), Pass(P){};
+ void schedule() override;
+ ScheduleDAGTopologicalSort &getTopo() { return Topo; };
+};
+
+} // End anonymous namespace.
+
+char HexagonPostIncOpt::ID = 0;
+
+INITIALIZE_PASS_BEGIN(HexagonPostIncOpt, DEBUG_TYPE,
+ "Hexagon Post-Inc-Opt Pass", false, false)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_END(HexagonPostIncOpt, DEBUG_TYPE, "Hexagon Post-Inc-Opt Pass",
+ false, false)
+
+/// Return true if MIA dominates MIB.
+static bool dominates(MachineInstr *MIA, MachineInstr *MIB) {
+ if (MIA->getParent() != MIB->getParent())
+ return false; // Don't know since machine dominator tree is out of date.
+
+ MachineBasicBlock *MBB = MIA->getParent();
+ MachineBasicBlock::iterator I = MBB->instr_begin();
+ // Iterate over the basic block until MIA or MIB is found.
+ for (; &*I != MIA && &*I != MIB; ++I)
+ ;
+
+ // MIA dominates MIB if MIA is found first.
+ return &*I == MIA;
+}
+
+// Return the Phi register value that comes from the loop block.
+static unsigned getLoopPhiReg(MachineInstr *Phi, MachineBasicBlock *LoopBB) {
+ for (unsigned i = 1, e = Phi->getNumOperands(); i != e; i += 2)
+ if (Phi->getOperand(i + 1).getMBB() == LoopBB)
+ return Phi->getOperand(i).getReg();
+ return UINT_MAX;
+}
+
+static bool isAddWithImmValue(const MachineInstr &MI) {
+ // FIXME: For now, only deal with adds that have strict immediate values.
+ // Some A2_addi instructions can be of the form.
+ // %338:intregs = A2_addi %7:intregs, @_ZL7phs_tbl + 16
+ return MI.getOpcode() == Hexagon::A2_addi && MI.getOperand(2).isImm();
+}
+
+// Compute the number of 'real' instructions in the basic block by
+// ignoring terminators.
+static unsigned getBasicBlockSize(MachineBasicBlock &MBB) {
+ unsigned size = 0;
+ for (auto &I : make_range(MBB.begin(), MBB.getFirstTerminator()))
+ if (!I.isDebugInstr())
+ size++;
+ return size;
+}
+
+// Setup Post increment Schedule DAG.
+static void initPISchedDAG(HexagonPostIncOptSchedDAG &PIDAG,
+ MachineBasicBlock &MBB) {
+ PIDAG.startBlock(&MBB);
+ PIDAG.enterRegion(&MBB, MBB.begin(), MBB.getFirstTerminator(),
+ getBasicBlockSize(MBB));
+ // Build the graph.
+ PIDAG.schedule();
+ // exitRegion() is an empty function in base class. So, safe to call it here.
+ PIDAG.exitRegion();
+}
+
+// Check if post-increment candidate has any memory dependence on any
+// instruction in the chain.
+static bool hasMemoryDependency(SUnit *PostIncSU,
+ SmallVector<MachineInstr *, 4> &UseList) {
+
+ // FIXME: Fine tune the order dependence. Probably can only consider memory
+ // related OrderKind.
+ for (auto &Dep : PostIncSU->Succs)
+ if (Dep.getKind() == SDep::Order)
+ if (std::find(UseList.begin(), UseList.end(),
+ Dep.getSUnit()->getInstr()) != UseList.end())
+ return true;
+
+ return false;
+}
+
+// Fold an add with immediate into either an add or a load or a store.
+void HexagonPostIncOpt::foldAdds(MachineBasicBlock &MBB) const {
+ LLVM_DEBUG(dbgs() << "#Fold add instructions in this block.\n");
+ for (auto &MI : make_range(MBB.getFirstNonPHI(), MBB.getFirstTerminator())) {
+ if (!isAddWithImmValue(MI))
+ continue;
+ unsigned DefReg = MI.getOperand(0).getReg();
+ unsigned AddReg = MI.getOperand(1).getReg();
+ int64_t AddImm = MI.getOperand(2).getImm();
+
+ SmallVector<MachineInstr *, 4> UseList;
+ // Gather the uses of add instruction's def reg.
+ for (auto &MO : make_range(MRI->use_begin(DefReg), MRI->use_end())) {
+ MachineInstr *UseMI = MO.getParent();
+ // Deal with only the instuctions that belong to this block.
+ // If we cross this block, the generation of post-increment logic
+ // will not be able to transform to post-inc due to dominance.
+ if (UseMI->getParent() == &MBB)
+ UseList.push_back(UseMI);
+ }
+
+ if (UseList.empty())
+ continue;
+
+ LLVM_DEBUG({
+ dbgs() << "Current instruction considered for folding \n";
+ MI.dump();
+ });
+
+ for (auto UseMI : UseList) {
+ if (isAddWithImmValue(*UseMI)) {
+ int64_t NewImm = AddImm + UseMI->getOperand(2).getImm();
+ // Fold if the new immediate is with in the range.
+ if (HII->isValidOffset(UseMI->getOpcode(), NewImm, TRI, false)) {
+ LLVM_DEBUG({
+ UseMI->dump();
+ dbgs() << "\t is folded in to \n";
+ });
+ UseMI->getOperand(1).setReg(AddReg);
+ UseMI->getOperand(2).setImm(NewImm);
+ LLVM_DEBUG(UseMI->dump());
+ }
+ } else if (HII->isBaseImmOffset(*UseMI)) {
+ LLVM_DEBUG({
+ UseMI->dump();
+ dbgs() << "\t is folded in to \n";
+ });
+ updateBaseAndOffset(*UseMI, MI);
+ LLVM_DEBUG(UseMI->dump());
+ }
+ LLVM_DEBUG(dbgs() << "\n");
+ }
+ }
+ removeDeadInstructions(MBB);
+ LLVM_DEBUG(dbgs() << "#End of the fold instructions logic.\n");
+}
+
+void HexagonPostIncOpt::updateBaseAndOffset(MachineInstr &MI,
+ MachineInstr &AddMI) const {
+ assert(HII->isBaseImmOffset(MI));
+ unsigned BasePos, OffsetPos;
+ if (!HII->getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
+ return;
+
+ MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
+ MachineOperand &BaseOp = MI.getOperand(BasePos);
+
+ if (BaseOp.getReg() != AddMI.getOperand(0).getReg())
+ return;
+
+ unsigned IncBase = AddMI.getOperand(1).getReg();
+ int64_t IncValue = AddMI.getOperand(2).getImm();
+
+ int64_t NewOffset = OffsetOp.getImm() + IncValue;
+ if (!isValidOffset(MI, NewOffset))
+ return;
+
+ OffsetOp.setImm(NewOffset);
+ BaseOp.setReg(IncBase);
+}
+
+void HexagonPostIncOpt::removeDeadInstructions(MachineBasicBlock &MBB) const {
+ // For MBB, check that the value defined by each instruction is used.
+ // If not, delete it.
+ for (MachineBasicBlock::reverse_instr_iterator MI = MBB.instr_rbegin(),
+ ME = MBB.instr_rend();
+ MI != ME;) {
+ // From DeadMachineInstructionElem. Don't delete inline assembly.
+ if (MI->isInlineAsm()) {
+ ++MI;
+ continue;
+ }
+ bool SawStore = false;
+ // Check if it's safe to remove the instruction due to side effects.
+ if (!MI->isSafeToMove(nullptr, SawStore)) {
+ ++MI;
+ continue;
+ }
+ unsigned Uses = 0;
+ for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end();
+ MOI != MOE; ++MOI) {
+ if (!MOI->isReg() || !MOI->isDef())
+ continue;
+ unsigned reg = MOI->getReg();
+ // Assume physical registers are used.
+ if (Register::isPhysicalRegister(reg)) {
+ Uses++;
+ continue;
+ }
+ if (MRI->use_begin(reg) != MRI->use_end())
+ Uses++;
+ }
+ if (!Uses) {
+ MI++->eraseFromParent();
+ continue;
+ }
+ ++MI;
+ }
+}
+
+bool HexagonPostIncOpt::isPostIncInsn(MachineInstr &MI) const {
+ // Predicated post-increments are not yet handled. (ISel is not generating
+ // them yet). Circular buffer instructions should not be handled.
+ return (HII->isPostIncWithImmOffset(MI) && !HII->isPredicated(MI) &&
+ !HII->isCircBufferInstr(MI));
+}
+
+/// For instructions with a base and offset, return true if the new Offset
+/// is a valid value with the correct alignment.
+bool HexagonPostIncOpt::isValidOffset(const MachineInstr &MI,
+ int64_t Offset) const {
+ if (!HII->isValidOffset(MI.getOpcode(), Offset, TRI, false))
+ return false;
+ unsigned AlignMask = HII->getMemAccessSize(MI) - 1;
+ return (Offset & AlignMask) == 0;
+}
+
+bool HexagonPostIncOpt::isValidPostIncValue(const MachineInstr &MI,
+ int IncVal) const {
+ unsigned AlignMask = HII->getMemAccessSize(MI) - 1;
+ if ((IncVal & AlignMask) != 0)
+ return false;
+
+ // Number of total bits in the instruction used to encode Inc value.
+ unsigned IncBits = 4;
+ // For HVX instructions, the offset is 3.
+ if (HexagonII::isCVI(MI.getDesc()))
+ IncBits = 3;
+
+ IncBits += Log2_32(HII->getMemAccessSize(MI));
+ if (HII->getMemAccessSize(MI) > 8)
+ IncBits = 16;
+
+ int MinValidVal = -1U << (IncBits - 1);
+ int MaxValidVal = ~(-1U << (IncBits - 1));
+ return (IncVal >= MinValidVal && IncVal <= MaxValidVal);
+}
+
+void HexagonPostIncOptSchedDAG::schedule() {
+ AliasAnalysis *AA = &Pass.getAnalysis<AAResultsWrapperPass>().getAAResults();
+ buildSchedGraph(AA);
+}
+
+// Replace post-increment operations with base+offset counterpart.
+void HexagonPostIncOpt::replacePostIncWithBaseOffset(
+ MachineBasicBlock &MBB) const {
+ LLVM_DEBUG(dbgs() << "#Replacing post-increment instructions with "
+ "base+offset counterparts.\n");
+
+ SmallVector<MachineInstr *, 4> MIList;
+ for (auto &MI : make_range(MBB.getFirstNonPHI(), MBB.getFirstTerminator())) {
+ // Check for eligible post-inc candidates.
+ if (!isPostIncInsn(MI))
+ continue;
+ MIList.push_back(&MI);
+ }
+
+ for (auto MI : MIList)
+ replacePostIncWithBaseOffset(*MI);
+
+ LLVM_DEBUG(dbgs() << "#Done with replacing post-increment instructions.\n");
+}
+
+void HexagonPostIncOpt::replacePostIncWithBaseOffset(MachineInstr &MI) const {
+ short NewOpcode = HII->changeAddrMode_pi_io(MI.getOpcode());
+ if (NewOpcode < 0)
+ return;
+
+ unsigned BasePos = 0, OffsetPos = 0;
+ if (!HII->getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
+ return;
+ const MachineOperand &PostIncOffset = MI.getOperand(OffsetPos);
+ const MachineOperand &PostIncBase = MI.getOperand(BasePos);
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ DebugLoc DL = MI.getDebugLoc();
+ MachineOperand *PostIncDest;
+ MachineInstrBuilder MIB;
+ if (MI.mayLoad()) {
+ PostIncDest = &MI.getOperand(1);
+ const MachineOperand &LDValue = MI.getOperand(0);
+ MIB = BuildMI(MBB, MI, DL, HII->get(NewOpcode));
+ MIB.add(LDValue).add(PostIncBase).addImm(0);
+ } else {
+ PostIncDest = &MI.getOperand(0);
+ const MachineOperand &STValue = MI.getOperand(3);
+ MIB = BuildMI(MBB, MI, DL, HII->get(NewOpcode));
+ MIB.add(PostIncBase).addImm(0).add(STValue);
+ }
+
+ // Transfer memoperands.
+ MIB->cloneMemRefs(*MBB.getParent(), MI);
+
+ // Create an add instruction for the post-inc addition of offset.
+ MachineInstrBuilder MIBA = BuildMI(MBB, MI, DL, HII->get(Hexagon::A2_addi));
+ MIBA.add(*PostIncDest).add(PostIncBase).add(PostIncOffset);
+
+ LLVM_DEBUG({
+ dbgs() << "\n";
+ MI.dump();
+ dbgs() << "\tis tranformed to \n";
+ MIB->dump();
+ MIBA->dump();
+ dbgs() << "\n\n";
+ });
+
+ MI.eraseFromParent();
+}
+
+void HexagonPostIncOpt::generatePostInc(MachineBasicBlock &MBB) {
+ LLVM_DEBUG(dbgs() << "# Generate Post-inc and update uses if needed.\n");
+ MachineBasicBlock::iterator MII = MBB.getFirstNonPHI();
+ MachineBasicBlock::iterator MIE = MBB.instr_begin();
+ bool isOK = true;
+ while (MII != MIE) {
+ MachineInstr *Phi = &*std::prev(MII);
+ MII = std::prev(MII);
+ unsigned LoopVal = getLoopPhiReg(Phi, &MBB);
+ if (LoopVal == UINT_MAX)
+ continue;
+ MachineInstr *LoopInst = MRI->getVRegDef(LoopVal);
+ if (!isAddWithImmValue(*LoopInst))
+ continue;
+
+ if (LoopInst->getOpcode() != Hexagon::A2_addi)
+ continue;
+
+ unsigned AddReg = LoopInst->getOperand(1).getReg();
+ int64_t AddImm = LoopInst->getOperand(2).getImm();
+ SmallVector<MachineInstr *, 4> UseList;
+ MachineInstr *PostIncCandidate = nullptr;
+
+ // Find the probable candidates for Post-increment instruction.
+ SmallVector<MachineInstr *, 4> CandList;
+ for (auto &MO : make_range(MRI->use_begin(AddReg), MRI->use_end())) {
+ MachineInstr *UseMI = MO.getParent();
+
+ if (UseMI == LoopInst)
+ continue;
+
+ if (!dominates(UseMI, LoopInst)) {
+ isOK = false;
+ break;
+ }
+ const MachineOperand *BaseOp = nullptr;
+ int64_t Offset;
+ bool OffsetIsScalable;
+ if (!HII->isBaseImmOffset(*UseMI) ||
+ !HII->getMemOperandWithOffset(*UseMI, BaseOp, Offset,
+ OffsetIsScalable, TRI)) {
+ isOK = false;
+ break;
+ }
+ int64_t NewOffset = Offset - AddImm;
+ if (!isValidOffset(*UseMI, NewOffset) || !BaseOp->isReg() ||
+ BaseOp->getReg() != AddReg) {
+ isOK = false;
+ break;
+ }
+ if (OffsetIsScalable) {
+ isOK = false;
+ break;
+ }
+ if (Offset == 0) {
+ // If you have stores in the chain, make sure they are in the beginning
+ // of the list. Eg: LD, LD, ST, ST will end up as LD, LD, PostInc_ST,
+ // ST.
+ if (UseMI->mayStore() && PreferPostIncStore)
+ CandList.insert(CandList.begin(), UseMI);
+ else
+ CandList.push_back(UseMI);
+ continue;
+ }
+ UseList.push_back(UseMI);
+ }
+
+ if (!isOK)
+ continue;
+
+ for (auto MI : CandList) {
+ if (!PostIncCandidate)
+ PostIncCandidate = MI;
+ // Push the rest of the list for updation.
+ else
+ UseList.push_back(MI);
+ }
+
+ // If a candidate is found, replace it with the post-inc instruction.
+ // Also, adjust offset for other uses as needed.
+ if (!PostIncCandidate || !canReplaceWithPostInc(PostIncCandidate, LoopInst))
+ continue;
+
+ // Logic to determine what the base register to be.
+ // There are two choices:
+ // 1. New address register after we updated the post-increment candidate.
+ // v2,v3 = post_load v1, 4
+ // v3 is the choice here.
+ // 2. The base register we used in post-increment candidate.
+ // v2,v3 = post_load v1, 4
+ // v1 is the choice here.
+ // Use v3 if there is a memory dependence between post-inc instruction and
+ // any other instruction in the chain.
+ // FIXME: We can do some complex DAG analysis based off height and depth and
+ // selectively update other instructions in the chain. Use v3 if there are
+ // more instructions in the chain, otherwise we will end up increasing the
+ // height of the DAG resulting in more spills. By default we have a
+ // threshold controlled by the option "post-inc-chain-threshold" which is
+ // set to 4. v1 is preferred as we can packetize two memory operations in a
+ // single packet in scalar core. But it heavily depends on the structure of
+ // DAG.
+ bool UpdateBaseToNew = false;
+
+ // Do not bother to build a DAG and analyze if the Use list is empty.
+ if (!UseList.empty()) {
+ MachineFunction *MF = MBB.getParent();
+ // Setup the Post-inc schedule DAG.
+ HexagonPostIncOptSchedDAG PIDAG(*this, *MF, MLI);
+ initPISchedDAG(PIDAG, MBB);
+ SUnit *SU = PIDAG.getSUnit(PostIncCandidate);
+ if (hasMemoryDependency(SU, UseList) ||
+ UseList.size() >= PostIncChainThreshold)
+ UpdateBaseToNew = true;
+ }
+
+ if (UpdateBaseToNew) {
+ LLVM_DEBUG(dbgs() << "The heuristic determines to update the uses of the "
+ "base register of post-increment\n");
+ for (auto UseMI : UseList) {
+ if (!dominates(PostIncCandidate, UseMI))
+ continue;
+ unsigned BasePos, OffsetPos;
+ if (HII->getBaseAndOffsetPosition(*UseMI, BasePos, OffsetPos)) {
+ // New offset has already been validated; no need to do it again.
+ LLVM_DEBUG({
+ UseMI->dump();
+ dbgs() << "\t is transformed to \n";
+ });
+ int64_t NewOffset = UseMI->getOperand(OffsetPos).getImm() - AddImm;
+ UseMI->getOperand(OffsetPos).setImm(NewOffset);
+ UseMI->getOperand(BasePos).setReg(LoopVal);
+ LLVM_DEBUG(UseMI->dump());
+ }
+ }
+ }
+ replaceWithPostInc(PostIncCandidate, LoopInst);
+ }
+ LLVM_DEBUG(dbgs() << "# End of generation of Post-inc.\n");
+}
+
+bool HexagonPostIncOpt::canReplaceWithPostInc(MachineInstr *MI,
+ MachineInstr *AddMI) const {
+ if (HII->changeAddrMode_io_pi(MI->getOpcode()) < 0)
+ return false;
+ assert(AddMI->getOpcode() == Hexagon::A2_addi);
+ return isValidPostIncValue(*MI, AddMI->getOperand(2).getImm());
+}
+
+void HexagonPostIncOpt::replaceWithPostInc(MachineInstr *MI,
+ MachineInstr *AddMI) const {
+ short NewOpcode = HII->changeAddrMode_io_pi(MI->getOpcode());
+ assert(NewOpcode >= 0 &&
+ "Couldn't change base offset to post-increment form");
+
+ MachineBasicBlock &MBB = *MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ const MachineOperand &IncDest = AddMI->getOperand(0);
+ const MachineOperand &IncBase = AddMI->getOperand(1);
+ const MachineOperand &IncValue = AddMI->getOperand(2);
+ MachineInstrBuilder MIB;
+ LLVM_DEBUG({
+ dbgs() << "\n\n";
+ MI->dump();
+ dbgs() << "\t is tranformed to post-inc form of \n";
+ });
+
+ if (MI->mayLoad()) {
+ const MachineOperand &LDValue = MI->getOperand(0);
+ MIB = BuildMI(MBB, *MI, DL, HII->get(NewOpcode));
+ MIB.add(LDValue).add(IncDest).add(IncBase).add(IncValue);
+ } else {
+ const MachineOperand &STValue = MI->getOperand(2);
+ MIB = BuildMI(MBB, *MI, DL, HII->get(NewOpcode));
+ MIB.add(IncDest).add(IncBase).add(IncValue).add(STValue);
+ }
+
+ // Transfer memoperands.
+ MIB->cloneMemRefs(*MBB.getParent(), *MI);
+
+ LLVM_DEBUG({
+ MIB->dump();
+ dbgs() << "As a result this add instruction is erased.\n";
+ AddMI->dump();
+ });
+
+ MI->eraseFromParent();
+ AddMI->eraseFromParent();
+}
+
+bool HexagonPostIncOpt::translatePostIncsInLoop(MachineBasicBlock &MBB) {
+ // Algorithm:
+ // 1. Replace all the post-inc instructions with Base+Offset instruction and
+ // an add instruction in this block.
+ // 2. Fold all the adds in to respective uses.
+ // 3. Generate post-increment instructions and update the uses of the base
+ // register if needed based on constraints.
+
+ replacePostIncWithBaseOffset(MBB);
+ foldAdds(MBB);
+ generatePostInc(MBB);
+ return true;
+}
+
+bool HexagonPostIncOpt::runOnMachineFunction(MachineFunction &MF) {
+
+ // Skip pass if requested.
+ if (skipFunction(MF.getFunction()))
+ return false;
+
+ // Get Target Information.
+ MLI = &getAnalysis<MachineLoopInfo>();
+ HST = &MF.getSubtarget<HexagonSubtarget>();
+ TRI = HST->getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ HII = HST->getInstrInfo();
+
+ // Skip this pass for TinyCore.
+ // Tiny core allwos partial post increment operations - This constraint can
+ // be imposed inside the pass. In a chain of post-increments, the first can
+ // be post-increment, rest can be adjusted to base+offset (these are
+ // inexpensive in most of the cases);
+ if (HST->isTinyCore())
+ return false;
+
+ LLVM_DEBUG({
+ dbgs() << "Begin: Hexagon Post-Inc-Opt Pass.\n";
+ dbgs() << "Function: " << MF.getName() << "\n";
+ });
+ bool Change = false;
+ std::vector<MachineBasicBlock *> MLBB;
+ for (auto &BB : MF) {
+ // Check if this Basic Block belongs to any loop.
+ auto *LI = MLI->getLoopFor(&BB);
+ // We only deal with inner-most loops that has one block.
+ if (LI && LI->getBlocks().size() == 1) {
+ MachineBasicBlock *MBB = LI->getHeader();
+ // Do not traverse blocks that are already visited.
+ if (std::find(MLBB.begin(), MLBB.end(), MBB) != MLBB.end())
+ continue;
+
+ MLBB.push_back(MBB);
+
+ LLVM_DEBUG(dbgs() << "\n\t Basic Block: " << MBB->getName() << "\n");
+ Change |= translatePostIncsInLoop(*MBB);
+ }
+ }
+ LLVM_DEBUG(dbgs() << "End: Hexagon Post-Inc-Opt Pass\n");
+ return Change;
+}
+
+FunctionPass *llvm::createHexagonPostIncOpt() {
+ return new HexagonPostIncOpt();
+}
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 7d4b420071c4..a5ebd64f1f8a 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -65,6 +65,10 @@ static cl::opt<bool> EnableExpandCondsets("hexagon-expand-condsets",
cl::init(true), cl::Hidden,
cl::desc("Early expansion of MUX"));
+static cl::opt<bool> EnableTfrCleanup("hexagon-tfr-cleanup", cl::init(true),
+ cl::Hidden,
+ cl::desc("Cleanup of TFRs/COPYs"));
+
static cl::opt<bool> EnableEarlyIf("hexagon-eif", cl::init(true), cl::Hidden,
cl::desc("Enable early if-conversion"));
@@ -92,6 +96,10 @@ static cl::opt<bool>
static cl::opt<bool> DisableHSDR("disable-hsdr", cl::init(false), cl::Hidden,
cl::desc("Disable splitting double registers"));
+static cl::opt<bool>
+ EnableGenMemAbs("hexagon-mem-abs", cl::init(true), cl::Hidden,
+ cl::desc("Generate absolute set instructions"));
+
static cl::opt<bool> EnableBitSimplify("hexagon-bit", cl::init(true),
cl::Hidden, cl::desc("Bit simplification"));
@@ -121,6 +129,10 @@ static cl::opt<bool> EnableInstSimplify("hexagon-instsimplify", cl::Hidden,
cl::init(true),
cl::desc("Enable instsimplify"));
+static cl::opt<bool> DisableHexagonPostIncOpt(
+ "hexagon-postinc-opt", cl::Hidden,
+ cl::desc("Disable Hexagon post-increment optimization"));
+
/// HexagonTargetMachineModule - Note that this is used on hosts that
/// cannot link in a library unless there are references into the
/// library. In particular, it seems that it is not possible to get
@@ -145,20 +157,24 @@ SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler",
namespace llvm {
extern char &HexagonExpandCondsetsID;
+ extern char &HexagonTfrCleanupID;
void initializeHexagonBitSimplifyPass(PassRegistry&);
void initializeHexagonConstExtendersPass(PassRegistry&);
void initializeHexagonConstPropagationPass(PassRegistry&);
void initializeHexagonCopyToCombinePass(PassRegistry&);
void initializeHexagonEarlyIfConversionPass(PassRegistry&);
void initializeHexagonExpandCondsetsPass(PassRegistry&);
+ void initializeHexagonGenMemAbsolutePass(PassRegistry &);
void initializeHexagonGenMuxPass(PassRegistry&);
void initializeHexagonHardwareLoopsPass(PassRegistry&);
void initializeHexagonLoopIdiomRecognizeLegacyPassPass(PassRegistry &);
void initializeHexagonNewValueJumpPass(PassRegistry&);
void initializeHexagonOptAddrModePass(PassRegistry&);
void initializeHexagonPacketizerPass(PassRegistry&);
+ void initializeHexagonPostIncOptPass(PassRegistry &);
void initializeHexagonRDFOptPass(PassRegistry&);
void initializeHexagonSplitDoubleRegsPass(PassRegistry&);
+ void initializeHexagonTfrCleanupPass(PassRegistry &);
void initializeHexagonVExtractPass(PassRegistry &);
void initializeHexagonVectorCombineLegacyPass(PassRegistry&);
void initializeHexagonVectorLoopCarriedReuseLegacyPassPass(PassRegistry &);
@@ -177,6 +193,7 @@ namespace llvm {
FunctionPass *createHexagonFixupHwLoops();
FunctionPass *createHexagonGenExtract();
FunctionPass *createHexagonGenInsert();
+ FunctionPass *createHexagonGenMemAbsolute();
FunctionPass *createHexagonGenMux();
FunctionPass *createHexagonGenPredicate();
FunctionPass *createHexagonHardwareLoops();
@@ -188,10 +205,12 @@ namespace llvm {
FunctionPass *createHexagonOptimizeSZextends();
FunctionPass *createHexagonPacketizer(bool Minimal);
FunctionPass *createHexagonPeephole();
+ FunctionPass *createHexagonPostIncOpt();
FunctionPass *createHexagonRDFOpt();
FunctionPass *createHexagonSplitConst32AndConst64();
FunctionPass *createHexagonSplitDoubleRegs();
FunctionPass *createHexagonStoreWidening();
+ FunctionPass *createHexagonTfrCleanup();
FunctionPass *createHexagonVectorCombineLegacyPass();
FunctionPass *createHexagonVectorPrint();
FunctionPass *createHexagonVExtract();
@@ -211,12 +230,14 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeHexagonTarget() {
initializeHexagonConstPropagationPass(PR);
initializeHexagonCopyToCombinePass(PR);
initializeHexagonEarlyIfConversionPass(PR);
+ initializeHexagonGenMemAbsolutePass(PR);
initializeHexagonGenMuxPass(PR);
initializeHexagonHardwareLoopsPass(PR);
initializeHexagonLoopIdiomRecognizeLegacyPassPass(PR);
initializeHexagonNewValueJumpPass(PR);
initializeHexagonOptAddrModePass(PR);
initializeHexagonPacketizerPass(PR);
+ initializeHexagonPostIncOptPass(PR);
initializeHexagonRDFOptPass(PR);
initializeHexagonSplitDoubleRegsPass(PR);
initializeHexagonVectorCombineLegacyPass(PR);
@@ -244,6 +265,8 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
(HexagonNoOpt ? CodeGenOptLevel::None : OL)),
TLOF(std::make_unique<HexagonTargetObjectFile>()) {
initializeHexagonExpandCondsetsPass(*PassRegistry::getPassRegistry());
+ initializeHexagonTfrCleanupPass(*PassRegistry::getPassRegistry());
+ initializeHexagonPostIncOptPass(*PassRegistry::getPassRegistry());
initAsmInfo();
}
@@ -411,11 +434,20 @@ void HexagonPassConfig::addPreRegAlloc() {
addPass(createHexagonConstExtenders());
if (EnableExpandCondsets)
insertPass(&RegisterCoalescerID, &HexagonExpandCondsetsID);
+ if (EnableTfrCleanup)
+ insertPass(&VirtRegRewriterID, &HexagonTfrCleanupID);
if (!DisableStoreWidening)
addPass(createHexagonStoreWidening());
+ if (EnableGenMemAbs)
+ addPass(createHexagonGenMemAbsolute());
if (!DisableHardwareLoops)
addPass(createHexagonHardwareLoops());
}
+
+ if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive)
+ if (!DisableHexagonPostIncOpt)
+ addPass(createHexagonPostIncOpt());
+
if (TM->getOptLevel() >= CodeGenOptLevel::Default)
addPass(&MachinePipelinerID);
}
diff --git a/llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp b/llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp
new file mode 100644
index 000000000000..a4b359af303a
--- /dev/null
+++ b/llvm/lib/Target/Hexagon/HexagonTfrCleanup.cpp
@@ -0,0 +1,324 @@
+//===------- HexagonTfrCleanup.cpp - Hexagon Transfer Cleanup Pass -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This pass is to address a situation that appears after register allocaion
+// evey now and then, namely a register copy from a source that was defined
+// as an immediate value in the same block (usually just before the copy).
+//
+// Here is an example of actual code emitted that shows this problem:
+//
+// .LBB0_5:
+// {
+// r5 = zxtb(r8)
+// r6 = or(r6, ##12345)
+// }
+// {
+// r3 = xor(r1, r2)
+// r1 = #0 <-- r1 set to #0
+// }
+// {
+// r7 = r1 <-- r7 set to r1
+// r0 = zxtb(r3)
+// }
+
+#define DEBUG_TYPE "tfr-cleanup"
+#include "HexagonTargetMachine.h"
+
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+namespace llvm {
+FunctionPass *createHexagonTfrCleanup();
+void initializeHexagonTfrCleanupPass(PassRegistry &);
+} // namespace llvm
+
+namespace {
+class HexagonTfrCleanup : public MachineFunctionPass {
+public:
+ static char ID;
+ HexagonTfrCleanup() : MachineFunctionPass(ID), HII(0), TRI(0) {
+ PassRegistry &R = *PassRegistry::getPassRegistry();
+ initializeHexagonTfrCleanupPass(R);
+ }
+ StringRef getPassName() const override { return "Hexagon TFR Cleanup"; }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ const HexagonInstrInfo *HII;
+ const TargetRegisterInfo *TRI;
+
+ typedef DenseMap<unsigned, uint64_t> ImmediateMap;
+
+ bool isIntReg(unsigned Reg, bool &Is32);
+ void setReg(unsigned R32, uint32_t V32, ImmediateMap &IMap);
+ bool getReg(unsigned Reg, uint64_t &Val, ImmediateMap &IMap);
+ bool updateImmMap(MachineInstr *MI, ImmediateMap &IMap);
+ bool rewriteIfImm(MachineInstr *MI, ImmediateMap &IMap, SlotIndexes *Indexes);
+ bool eraseIfRedundant(MachineInstr *MI, SlotIndexes *Indexes);
+};
+} // namespace
+
+char HexagonTfrCleanup::ID = 0;
+
+namespace llvm {
+char &HexagonTfrCleanupID = HexagonTfrCleanup::ID;
+}
+
+bool HexagonTfrCleanup::isIntReg(unsigned Reg, bool &Is32) {
+ Is32 = Hexagon::IntRegsRegClass.contains(Reg);
+ return Is32 || Hexagon::DoubleRegsRegClass.contains(Reg);
+}
+
+// Assign given value V32 to the specified the register R32 in the map. Only
+// 32-bit registers are valid arguments.
+void HexagonTfrCleanup::setReg(unsigned R32, uint32_t V32, ImmediateMap &IMap) {
+ ImmediateMap::iterator F = IMap.find(R32);
+ if (F == IMap.end())
+ IMap.insert(std::make_pair(R32, V32));
+ else
+ F->second = V32;
+}
+
+// Retrieve a value of the provided register Reg and store it into Val.
+// Return "true" if a value was found, "false" otherwise.
+bool HexagonTfrCleanup::getReg(unsigned Reg, uint64_t &Val,
+ ImmediateMap &IMap) {
+ bool Is32;
+ if (!isIntReg(Reg, Is32))
+ return false;
+
+ if (Is32) {
+ ImmediateMap::iterator F = IMap.find(Reg);
+ if (F == IMap.end())
+ return false;
+ Val = F->second;
+ return true;
+ }
+
+ // For 64-bit registers, compose the value from the values of its
+ // subregisters.
+ unsigned SubL = TRI->getSubReg(Reg, Hexagon::isub_lo);
+ unsigned SubH = TRI->getSubReg(Reg, Hexagon::isub_hi);
+ ImmediateMap::iterator FL = IMap.find(SubL), FH = IMap.find(SubH);
+ if (FL == IMap.end() || FH == IMap.end())
+ return false;
+ Val = (FH->second << 32) | FL->second;
+ return true;
+}
+
+// Process an instruction and record the relevant information in the imme-
+// diate map.
+bool HexagonTfrCleanup::updateImmMap(MachineInstr *MI, ImmediateMap &IMap) {
+ using namespace Hexagon;
+
+ if (MI->isCall()) {
+ IMap.clear();
+ return true;
+ }
+
+ // If this is an instruction that loads a constant into a register,
+ // record this information in IMap.
+ unsigned Opc = MI->getOpcode();
+ if (Opc == A2_tfrsi || Opc == A2_tfrpi) {
+ unsigned DefR = MI->getOperand(0).getReg();
+ bool Is32;
+ if (!isIntReg(DefR, Is32))
+ return false;
+ if (!MI->getOperand(1).isImm()) {
+ if (!Is32) {
+ IMap.erase(TRI->getSubReg(DefR, isub_lo));
+ IMap.erase(TRI->getSubReg(DefR, isub_hi));
+ } else {
+ IMap.erase(DefR);
+ }
+ return false;
+ }
+ uint64_t Val = MI->getOperand(1).getImm();
+ // If it's a 64-bit register, break it up into subregisters.
+ if (!Is32) {
+ uint32_t VH = (Val >> 32), VL = (Val & 0xFFFFFFFFU);
+ setReg(TRI->getSubReg(DefR, isub_lo), VL, IMap);
+ setReg(TRI->getSubReg(DefR, isub_hi), VH, IMap);
+ } else {
+ setReg(DefR, Val, IMap);
+ }
+ return true;
+ }
+
+ // Not a A2_tfr[sp]i. Invalidate all modified registers in IMap.
+ for (MachineInstr::mop_iterator Mo = MI->operands_begin(),
+ E = MI->operands_end();
+ Mo != E; ++Mo) {
+ if (Mo->isRegMask()) {
+ IMap.clear();
+ return true;
+ }
+ if (!Mo->isReg() || !Mo->isDef())
+ continue;
+ unsigned R = Mo->getReg();
+ for (MCRegAliasIterator AR(R, TRI, true); AR.isValid(); ++AR) {
+ ImmediateMap::iterator F = IMap.find(*AR);
+ if (F != IMap.end())
+ IMap.erase(F);
+ }
+ }
+ return true;
+}
+
+// Rewrite the instruction as A2_tfrsi/A2_tfrpi, it is a copy of a source that
+// has a known constant value.
+bool HexagonTfrCleanup::rewriteIfImm(MachineInstr *MI, ImmediateMap &IMap,
+ SlotIndexes *Indexes) {
+ using namespace Hexagon;
+ unsigned Opc = MI->getOpcode();
+ switch (Opc) {
+ case A2_tfr:
+ case A2_tfrp:
+ case COPY:
+ break;
+ default:
+ return false;
+ }
+
+ unsigned DstR = MI->getOperand(0).getReg();
+ unsigned SrcR = MI->getOperand(1).getReg();
+ bool Tmp, Is32;
+ if (!isIntReg(DstR, Is32) || !isIntReg(SrcR, Tmp))
+ return false;
+ assert(Tmp == Is32 && "Register size mismatch");
+ uint64_t Val;
+ bool Found = getReg(SrcR, Val, IMap);
+ if (!Found)
+ return false;
+
+ MachineBasicBlock &B = *MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ int64_t SVal = Is32 ? int32_t(Val) : Val;
+ auto &HST = B.getParent()->getSubtarget<HexagonSubtarget>();
+ MachineInstr *NewMI;
+ if (Is32)
+ NewMI = BuildMI(B, MI, DL, HII->get(A2_tfrsi), DstR).addImm(SVal);
+ else if (isInt<8>(SVal))
+ NewMI = BuildMI(B, MI, DL, HII->get(A2_tfrpi), DstR).addImm(SVal);
+ else if (isInt<8>(SVal >> 32) && isInt<8>(int32_t(Val & 0xFFFFFFFFLL)))
+ NewMI = BuildMI(B, MI, DL, HII->get(A2_combineii), DstR)
+ .addImm(int32_t(SVal >> 32))
+ .addImm(int32_t(Val & 0xFFFFFFFFLL));
+ else if (HST.isTinyCore())
+ // Disable generating CONST64 since it requires load resource.
+ return false;
+ else
+ NewMI = BuildMI(B, MI, DL, HII->get(CONST64), DstR).addImm(Val);
+
+ // Replace the MI to reuse the same slot index
+ if (Indexes)
+ Indexes->replaceMachineInstrInMaps(*MI, *NewMI);
+ MI->eraseFromParent();
+ return true;
+}
+
+// Remove the instruction if it is a self-assignment.
+bool HexagonTfrCleanup::eraseIfRedundant(MachineInstr *MI,
+ SlotIndexes *Indexes) {
+ unsigned Opc = MI->getOpcode();
+ unsigned DefR, SrcR;
+ bool IsUndef = false;
+ switch (Opc) {
+ case Hexagon::A2_tfr:
+ // Rd = Rd
+ DefR = MI->getOperand(0).getReg();
+ SrcR = MI->getOperand(1).getReg();
+ IsUndef = MI->getOperand(1).isUndef();
+ break;
+ case Hexagon::A2_tfrt:
+ case Hexagon::A2_tfrf:
+ // if ([!]Pu) Rd = Rd
+ DefR = MI->getOperand(0).getReg();
+ SrcR = MI->getOperand(2).getReg();
+ IsUndef = MI->getOperand(2).isUndef();
+ break;
+ default:
+ return false;
+ }
+ if (DefR != SrcR)
+ return false;
+ if (IsUndef) {
+ MachineBasicBlock &B = *MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ auto DefI = BuildMI(B, MI, DL, HII->get(TargetOpcode::IMPLICIT_DEF), DefR);
+ for (auto &Op : MI->operands())
+ if (Op.isReg() && Op.isDef() && Op.isImplicit())
+ DefI->addOperand(Op);
+ }
+
+ if (Indexes)
+ Indexes->removeMachineInstrFromMaps(*MI);
+ MI->eraseFromParent();
+ return true;
+}
+
+bool HexagonTfrCleanup::runOnMachineFunction(MachineFunction &MF) {
+ bool Changed = false;
+ // Map: 32-bit register -> immediate value.
+ // 64-bit registers are stored through their subregisters.
+ ImmediateMap IMap;
+ SlotIndexes *Indexes = this->getAnalysisIfAvailable<SlotIndexes>();
+
+ auto &HST = MF.getSubtarget<HexagonSubtarget>();
+ HII = HST.getInstrInfo();
+ TRI = HST.getRegisterInfo();
+
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
+ MachineBasicBlock &B = *I;
+ MachineBasicBlock::iterator J, F, NextJ;
+ IMap.clear();
+ bool Inserted = false, Erased = false;
+ for (J = B.begin(), F = B.end(); J != F; J = NextJ) {
+ NextJ = std::next(J);
+ MachineInstr *MI = &*J;
+ bool E = eraseIfRedundant(MI, Indexes);
+ Erased |= E;
+ if (E)
+ continue;
+ Inserted |= rewriteIfImm(MI, IMap, Indexes);
+ MachineBasicBlock::iterator NewJ = std::prev(NextJ);
+ updateImmMap(&*NewJ, IMap);
+ }
+ bool BlockC = Inserted | Erased;
+ Changed |= BlockC;
+ if (BlockC && Indexes)
+ Indexes->repairIndexesInRange(&B, B.begin(), B.end());
+ }
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// Public Constructor Functions
+//===----------------------------------------------------------------------===//
+INITIALIZE_PASS(HexagonTfrCleanup, "tfr-cleanup", "Hexagon TFR Cleanup", false,
+ false)
+
+FunctionPass *llvm::createHexagonTfrCleanup() {
+ return new HexagonTfrCleanup();
+}
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
index ca982696b060..98404121bda0 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h
@@ -18,6 +18,7 @@
#include "HexagonDepITypes.h"
#include "MCTargetDesc/HexagonMCTargetDesc.h"
+#include "llvm/MC/MCInstrDesc.h"
namespace llvm {
@@ -48,7 +49,7 @@ namespace HexagonII {
// MCInstrDesc TSFlags
// *** Must match HexagonInstrFormat*.td ***
- enum {
+ enum HexagonTSFlagsVal {
// This 7-bit field describes the insn type.
TypePos = 0,
TypeMask = 0x7f,
@@ -173,6 +174,11 @@ namespace HexagonII {
hasUnaryRestrictionMask = 0x1,
};
+ inline unsigned getTSFlags(const MCInstrDesc &MID, HexagonTSFlagsVal Pos,
+ unsigned Mask) {
+ return (MID.TSFlags >> Pos) & Mask;
+ }
+
// *** The code above must match HexagonInstrFormat*.td *** //
// Hexagon specific MO operand flag mask.
@@ -275,6 +281,10 @@ namespace HexagonII {
INST_ICLASS_ALU32_3 = 0xf0000000
};
+ inline bool isCVI(const MCInstrDesc &MID) {
+ return getTSFlags(MID, isCVIPos, isCVIMask) != 0;
+ }
+
LLVM_ATTRIBUTE_UNUSED
static unsigned getMemAccessSizeInBytes(MemAccessSize S) {
switch (S) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index ded2f2584014..3ff8994602e1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -2135,6 +2135,21 @@ bool NVPTXDAGToDAGISel::tryStoreRetval(SDNode *N) {
NVPTX::StoreRetvalI8, NVPTX::StoreRetvalI16,
NVPTX::StoreRetvalI32, NVPTX::StoreRetvalI64,
NVPTX::StoreRetvalF32, NVPTX::StoreRetvalF64);
+ if (Opcode == NVPTX::StoreRetvalI8) {
+ // Fine tune the opcode depending on the size of the operand.
+ // This helps to avoid creating redundant COPY instructions in
+ // InstrEmitter::AddRegisterOperand().
+ switch (Ops[0].getSimpleValueType().SimpleTy) {
+ default:
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreRetvalI8TruncI32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::StoreRetvalI8TruncI64;
+ break;
+ }
+ }
break;
case 2:
Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
@@ -2211,6 +2226,21 @@ bool NVPTXDAGToDAGISel::tryStoreParam(SDNode *N) {
NVPTX::StoreParamI8, NVPTX::StoreParamI16,
NVPTX::StoreParamI32, NVPTX::StoreParamI64,
NVPTX::StoreParamF32, NVPTX::StoreParamF64);
+ if (Opcode == NVPTX::StoreParamI8) {
+ // Fine tune the opcode depending on the size of the operand.
+ // This helps to avoid creating redundant COPY instructions in
+ // InstrEmitter::AddRegisterOperand().
+ switch (Ops[0].getSimpleValueType().SimpleTy) {
+ default:
+ break;
+ case MVT::i32:
+ Opcode = NVPTX::StoreParamI8TruncI32;
+ break;
+ case MVT::i64:
+ Opcode = NVPTX::StoreParamI8TruncI64;
+ break;
+ }
+ }
break;
case 2:
Opcode = pickOpcodeForVT(Mem->getMemoryVT().getSimpleVT().SimpleTy,
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 7d2fe78d1422..66a101036f91 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -47,6 +47,7 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
+#include "llvm/Support/Alignment.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
@@ -59,6 +60,7 @@
#include <cmath>
#include <cstdint>
#include <iterator>
+#include <optional>
#include <sstream>
#include <string>
#include <utility>
@@ -1529,6 +1531,105 @@ Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,
return DL.getABITypeAlign(Ty);
}
+static bool adjustElementType(EVT &ElementType) {
+ switch (ElementType.getSimpleVT().SimpleTy) {
+ default:
+ return false;
+ case MVT::f16:
+ case MVT::bf16:
+ ElementType = MVT::i16;
+ return true;
+ case MVT::f32:
+ case MVT::v2f16:
+ case MVT::v2bf16:
+ ElementType = MVT::i32;
+ return true;
+ case MVT::f64:
+ ElementType = MVT::i64;
+ return true;
+ }
+}
+
+// Use byte-store when the param address of the argument value is unaligned.
+// This may happen when the return value is a field of a packed structure.
+//
+// This is called in LowerCall() when passing the param values.
+static SDValue LowerUnalignedStoreParam(SelectionDAG &DAG, SDValue Chain,
+ uint64_t Offset, EVT ElementType,
+ SDValue StVal, SDValue &InGlue,
+ unsigned ArgID, const SDLoc &dl) {
+ // Bit logic only works on integer types
+ if (adjustElementType(ElementType))
+ StVal = DAG.getNode(ISD::BITCAST, dl, ElementType, StVal);
+
+ // Store each byte
+ SDVTList StoreVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ for (unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
+ // Shift the byte to the last byte position
+ SDValue ShiftVal = DAG.getNode(ISD::SRL, dl, ElementType, StVal,
+ DAG.getConstant(i * 8, dl, MVT::i32));
+ SDValue StoreOperands[] = {Chain, DAG.getConstant(ArgID, dl, MVT::i32),
+ DAG.getConstant(Offset + i, dl, MVT::i32),
+ ShiftVal, InGlue};
+ // Trunc store only the last byte by using
+ // st.param.b8
+ // The register type can be larger than b8.
+ Chain = DAG.getMemIntrinsicNode(
+ NVPTXISD::StoreParam, dl, StoreVTs, StoreOperands, MVT::i8,
+ MachinePointerInfo(), Align(1), MachineMemOperand::MOStore);
+ InGlue = Chain.getValue(1);
+ }
+ return Chain;
+}
+
+// Use byte-load when the param adress of the returned value is unaligned.
+// This may happen when the returned value is a field of a packed structure.
+static SDValue
+LowerUnalignedLoadRetParam(SelectionDAG &DAG, SDValue &Chain, uint64_t Offset,
+ EVT ElementType, SDValue &InGlue,
+ SmallVectorImpl<SDValue> &TempProxyRegOps,
+ const SDLoc &dl) {
+ // Bit logic only works on integer types
+ EVT MergedType = ElementType;
+ adjustElementType(MergedType);
+
+ // Load each byte and construct the whole value. Initial value to 0
+ SDValue RetVal = DAG.getConstant(0, dl, MergedType);
+ // LoadParamMemI8 loads into i16 register only
+ SDVTList LoadVTs = DAG.getVTList(MVT::i16, MVT::Other, MVT::Glue);
+ for (unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
+ SDValue LoadOperands[] = {Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(Offset + i, dl, MVT::i32),
+ InGlue};
+ // This will be selected to LoadParamMemI8
+ SDValue LdVal =
+ DAG.getMemIntrinsicNode(NVPTXISD::LoadParam, dl, LoadVTs, LoadOperands,
+ MVT::i8, MachinePointerInfo(), Align(1));
+ SDValue TmpLdVal = LdVal.getValue(0);
+ Chain = LdVal.getValue(1);
+ InGlue = LdVal.getValue(2);
+
+ TmpLdVal = DAG.getNode(NVPTXISD::ProxyReg, dl,
+ TmpLdVal.getSimpleValueType(), TmpLdVal);
+ TempProxyRegOps.push_back(TmpLdVal);
+
+ SDValue CMask = DAG.getConstant(255, dl, MergedType);
+ SDValue CShift = DAG.getConstant(i * 8, dl, MVT::i32);
+ // Need to extend the i16 register to the whole width.
+ TmpLdVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MergedType, TmpLdVal);
+ // Mask off the high bits. Leave only the lower 8bits.
+ // Do this because we are using loadparam.b8.
+ TmpLdVal = DAG.getNode(ISD::AND, dl, MergedType, TmpLdVal, CMask);
+ // Shift and merge
+ TmpLdVal = DAG.getNode(ISD::SHL, dl, MergedType, TmpLdVal, CShift);
+ RetVal = DAG.getNode(ISD::OR, dl, MergedType, RetVal, TmpLdVal);
+ }
+ if (ElementType != MergedType)
+ RetVal = DAG.getNode(ISD::BITCAST, dl, ElementType, RetVal);
+
+ return RetVal;
+}
+
SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
@@ -1680,17 +1781,6 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (NeedAlign)
PartAlign = commonAlignment(ArgAlign, CurOffset);
- // New store.
- if (VectorInfo[j] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Unfinished preceding store.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(
- DAG.getConstant(IsVAArg ? FirstVAArg : ParamCount, dl, MVT::i32));
- StoreOperands.push_back(DAG.getConstant(
- IsByVal ? CurOffset + VAOffset : (IsVAArg ? VAOffset : CurOffset),
- dl, MVT::i32));
- }
-
SDValue StVal = OutVals[OIdx];
MVT PromotedVT;
@@ -1723,6 +1813,35 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
}
+ // If we have a PVF_SCALAR entry, it may not be sufficiently aligned for a
+ // scalar store. In such cases, fall back to byte stores.
+ if (VectorInfo[j] == PVF_SCALAR && !IsVAArg && PartAlign.has_value() &&
+ PartAlign.value() <
+ DL.getABITypeAlign(EltVT.getTypeForEVT(*DAG.getContext()))) {
+ assert(StoreOperands.empty() && "Unfinished preceeding store.");
+ Chain = LowerUnalignedStoreParam(
+ DAG, Chain, IsByVal ? CurOffset + VAOffset : CurOffset, EltVT,
+ StVal, InGlue, ParamCount, dl);
+
+ // LowerUnalignedStoreParam took care of inserting the necessary nodes
+ // into the SDAG, so just move on to the next element.
+ if (!IsByVal)
+ ++OIdx;
+ continue;
+ }
+
+ // New store.
+ if (VectorInfo[j] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Unfinished preceding store.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(
+ DAG.getConstant(IsVAArg ? FirstVAArg : ParamCount, dl, MVT::i32));
+
+ StoreOperands.push_back(DAG.getConstant(
+ IsByVal ? CurOffset + VAOffset : (IsVAArg ? VAOffset : CurOffset),
+ dl, MVT::i32));
+ }
+
// Record the value to store.
StoreOperands.push_back(StVal);
@@ -1923,6 +2042,14 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<SDValue, 16> ProxyRegOps;
SmallVector<std::optional<MVT>, 16> ProxyRegTruncates;
+ // An item of the vector is filled if the element does not need a ProxyReg
+ // operation on it and should be added to InVals as is. ProxyRegOps and
+ // ProxyRegTruncates contain empty/none items at the same index.
+ SmallVector<SDValue, 16> RetElts;
+ // A temporary ProxyReg operations inserted in `LowerUnalignedLoadRetParam()`
+ // to use the values of `LoadParam`s and to be replaced later then
+ // `CALLSEQ_END` is added.
+ SmallVector<SDValue, 16> TempProxyRegOps;
// Generate loads from param memory/moves from registers for result
if (Ins.size() > 0) {
@@ -1966,6 +2093,22 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
EltType = MVT::i16;
}
+ // If we have a PVF_SCALAR entry, it may not be sufficiently aligned for a
+ // scalar load. In such cases, fall back to byte loads.
+ if (VectorInfo[i] == PVF_SCALAR && RetTy->isAggregateType() &&
+ EltAlign < DL.getABITypeAlign(
+ TheLoadType.getTypeForEVT(*DAG.getContext()))) {
+ assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
+ SDValue Ret = LowerUnalignedLoadRetParam(
+ DAG, Chain, Offsets[i], TheLoadType, InGlue, TempProxyRegOps, dl);
+ ProxyRegOps.push_back(SDValue());
+ ProxyRegTruncates.push_back(std::optional<MVT>());
+ RetElts.resize(i);
+ RetElts.push_back(Ret);
+
+ continue;
+ }
+
// Record index of the very first element of the vector.
if (VectorInfo[i] & PVF_FIRST) {
assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
@@ -2028,6 +2171,11 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// will not get lost. Otherwise, during libcalls expansion, the nodes can become
// dangling.
for (unsigned i = 0; i < ProxyRegOps.size(); ++i) {
+ if (i < RetElts.size() && RetElts[i]) {
+ InVals.push_back(RetElts[i]);
+ continue;
+ }
+
SDValue Ret = DAG.getNode(
NVPTXISD::ProxyReg, dl,
DAG.getVTList(ProxyRegOps[i].getSimpleValueType(), MVT::Other, MVT::Glue),
@@ -2044,6 +2192,18 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
InVals.push_back(Ret);
}
+ for (SDValue &T : TempProxyRegOps) {
+ SDValue Repl = DAG.getNode(
+ NVPTXISD::ProxyReg, dl,
+ DAG.getVTList(T.getSimpleValueType(), MVT::Other, MVT::Glue),
+ {Chain, T.getOperand(0), InGlue});
+ DAG.ReplaceAllUsesWith(T, Repl);
+ DAG.RemoveDeadNode(T.getNode());
+
+ Chain = Repl.getValue(1);
+ InGlue = Repl.getValue(2);
+ }
+
// set isTailCall to false for now, until we figure out how to express
// tail call optimization in PTX
isTailCall = false;
@@ -3045,9 +3205,20 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
Value *srcValue = Constant::getNullValue(PointerType::get(
EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
+
+ const MaybeAlign PartAlign = [&]() -> MaybeAlign {
+ if (aggregateIsPacked)
+ return Align(1);
+ if (NumElts != 1)
+ return std::nullopt;
+ Align PartAlign =
+ (Offsets[parti] == 0 && PAL.getParamAlignment(i))
+ ? PAL.getParamAlignment(i).value()
+ : DL.getABITypeAlign(EltVT.getTypeForEVT(F->getContext()));
+ return commonAlignment(PartAlign, Offsets[parti]);
+ }();
SDValue P = DAG.getLoad(VecVT, dl, Root, VecAddr,
- MachinePointerInfo(srcValue),
- MaybeAlign(aggregateIsPacked ? 1 : 0),
+ MachinePointerInfo(srcValue), PartAlign,
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant);
if (P.getNode())
@@ -3113,6 +3284,33 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
return Chain;
}
+// Use byte-store when the param adress of the return value is unaligned.
+// This may happen when the return value is a field of a packed structure.
+static SDValue LowerUnalignedStoreRet(SelectionDAG &DAG, SDValue Chain,
+ uint64_t Offset, EVT ElementType,
+ SDValue RetVal, const SDLoc &dl) {
+ // Bit logic only works on integer types
+ if (adjustElementType(ElementType))
+ RetVal = DAG.getNode(ISD::BITCAST, dl, ElementType, RetVal);
+
+ // Store each byte
+ for (unsigned i = 0, n = ElementType.getSizeInBits() / 8; i < n; i++) {
+ // Shift the byte to the last byte position
+ SDValue ShiftVal = DAG.getNode(ISD::SRL, dl, ElementType, RetVal,
+ DAG.getConstant(i * 8, dl, MVT::i32));
+ SDValue StoreOperands[] = {Chain, DAG.getConstant(Offset + i, dl, MVT::i32),
+ ShiftVal};
+ // Trunc store only the last byte by using
+ // st.param.b8
+ // The register type can be larger than b8.
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
+ DAG.getVTList(MVT::Other), StoreOperands,
+ MVT::i8, MachinePointerInfo(), std::nullopt,
+ MachineMemOperand::MOStore);
+ }
+ return Chain;
+}
+
SDValue
NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
@@ -3162,13 +3360,6 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
SmallVector<SDValue, 6> StoreOperands;
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
- // New load/store. Record chain and offset operands.
- if (VectorInfo[i] & PVF_FIRST) {
- assert(StoreOperands.empty() && "Orphaned operand list.");
- StoreOperands.push_back(Chain);
- StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
- }
-
SDValue OutVal = OutVals[i];
SDValue RetVal = PromotedOutVals[i];
@@ -3182,6 +3373,32 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
}
+ // If we have a PVF_SCALAR entry, it may not even be sufficiently aligned
+ // for a scalar store. In such cases, fall back to byte stores.
+ if (VectorInfo[i] == PVF_SCALAR && RetTy->isAggregateType()) {
+ EVT ElementType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
+ Align ElementTypeAlign =
+ DL.getABITypeAlign(ElementType.getTypeForEVT(RetTy->getContext()));
+ Align ElementAlign =
+ commonAlignment(DL.getABITypeAlign(RetTy), Offsets[i]);
+ if (ElementAlign < ElementTypeAlign) {
+ assert(StoreOperands.empty() && "Orphaned operand list.");
+ Chain = LowerUnalignedStoreRet(DAG, Chain, Offsets[i], ElementType,
+ RetVal, dl);
+
+ // The call to LowerUnalignedStoreRet inserted the necessary SDAG nodes
+ // into the graph, so just move on to the next element.
+ continue;
+ }
+ }
+
+ // New load/store. Record chain and offset operands.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Orphaned operand list.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
+ }
+
// Record the value to return.
StoreOperands.push_back(RetVal);
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 55a1955a7f49..b3517ce066b8 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -2738,6 +2738,8 @@ def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
+def StoreParamI8TruncI32 : StoreParamInst<Int32Regs, ".b8">;
+def StoreParamI8TruncI64 : StoreParamInst<Int64Regs, ".b8">;
def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
@@ -2757,6 +2759,8 @@ def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
+def StoreRetvalI8TruncI32 : StoreRetvalInst<Int32Regs, ".b8">;
+def StoreRetvalI8TruncI64 : StoreRetvalInst<Int64Regs, ".b8">;
def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 904f1d7fdf90..c922098c5509 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2062,8 +2062,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
MVT SubVecContainerVT = SubVecVT;
// Establish the correct scalable-vector types for any fixed-length type.
- if (SubVecVT.isFixedLengthVector())
+ if (SubVecVT.isFixedLengthVector()) {
+ assert(Idx == 0 && V.isUndef());
SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT);
+ }
if (VT.isFixedLengthVector())
VT = TLI.getContainerForFixedLengthVector(VT);
@@ -2115,8 +2117,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering();
MVT SubVecContainerVT = VT;
// Establish the correct scalable-vector types for any fixed-length type.
- if (VT.isFixedLengthVector())
+ if (VT.isFixedLengthVector()) {
+ assert(Idx == 0);
SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT);
+ }
if (InVT.isFixedLengthVector())
InVT = TLI.getContainerForFixedLengthVector(InVT);
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f7275eb7c77b..540c2e7476dc 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -691,7 +691,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_FP_TO_UINT, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND,
ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE, ISD::VP_SMIN,
ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX,
- ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE, ISD::EXPERIMENTAL_VP_SPLICE};
+ ISD::VP_ABS, ISD::EXPERIMENTAL_VP_REVERSE, ISD::EXPERIMENTAL_VP_SPLICE,
+ ISD::VP_SADDSAT, ISD::VP_UADDSAT, ISD::VP_SSUBSAT,
+ ISD::VP_USUBSAT};
static const unsigned FloatingPointVPOps[] = {
ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
@@ -830,7 +832,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
VT, Custom);
setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
Custom);
- setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
setOperationAction({ISD::AVGFLOORU, ISD::AVGCEILU, ISD::SADDSAT,
ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT},
VT, Legal);
@@ -956,6 +957,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// between vXf16 and vXf64 must be lowered as sequences which convert via
// vXf32.
setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
+ setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
// Custom-lower insert/extract operations to simplify patterns.
setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
Custom);
@@ -3240,45 +3242,49 @@ static std::optional<uint64_t> getExactInteger(const APFloat &APF,
// Note that this method will also match potentially unappealing index
// sequences, like <i32 0, i32 50939494>, however it is left to the caller to
// determine whether this is worth generating code for.
-static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
- unsigned NumElts = Op.getNumOperands();
+static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op,
+ unsigned EltSizeInBits) {
assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
+ if (!cast<BuildVectorSDNode>(Op)->isConstant())
+ return std::nullopt;
bool IsInteger = Op.getValueType().isInteger();
std::optional<unsigned> SeqStepDenom;
std::optional<int64_t> SeqStepNum, SeqAddend;
std::optional<std::pair<uint64_t, unsigned>> PrevElt;
- unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
- for (unsigned Idx = 0; Idx < NumElts; Idx++) {
- // Assume undef elements match the sequence; we just have to be careful
- // when interpolating across them.
- if (Op.getOperand(Idx).isUndef())
+ assert(EltSizeInBits >= Op.getValueType().getScalarSizeInBits());
+
+ // First extract the ops into a list of constant integer values. This may not
+ // be possible for floats if they're not all representable as integers.
+ SmallVector<std::optional<uint64_t>> Elts(Op.getNumOperands());
+ const unsigned OpSize = Op.getScalarValueSizeInBits();
+ for (auto [Idx, Elt] : enumerate(Op->op_values())) {
+ if (Elt.isUndef()) {
+ Elts[Idx] = std::nullopt;
continue;
-
- uint64_t Val;
+ }
if (IsInteger) {
- // The BUILD_VECTOR must be all constants.
- if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
- return std::nullopt;
- Val = Op.getConstantOperandVal(Idx) &
- maskTrailingOnes<uint64_t>(EltSizeInBits);
+ Elts[Idx] = Elt->getAsZExtVal() & maskTrailingOnes<uint64_t>(OpSize);
} else {
- // The BUILD_VECTOR must be all constants.
- if (!isa<ConstantFPSDNode>(Op.getOperand(Idx)))
- return std::nullopt;
- if (auto ExactInteger = getExactInteger(
- cast<ConstantFPSDNode>(Op.getOperand(Idx))->getValueAPF(),
- EltSizeInBits))
- Val = *ExactInteger;
- else
+ auto ExactInteger =
+ getExactInteger(cast<ConstantFPSDNode>(Elt)->getValueAPF(), OpSize);
+ if (!ExactInteger)
return std::nullopt;
+ Elts[Idx] = *ExactInteger;
}
+ }
+
+ for (auto [Idx, Elt] : enumerate(Elts)) {
+ // Assume undef elements match the sequence; we just have to be careful
+ // when interpolating across them.
+ if (!Elt)
+ continue;
if (PrevElt) {
// Calculate the step since the last non-undef element, and ensure
// it's consistent across the entire sequence.
unsigned IdxDiff = Idx - PrevElt->second;
- int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
+ int64_t ValDiff = SignExtend64(*Elt - PrevElt->first, EltSizeInBits);
// A zero-value value difference means that we're somewhere in the middle
// of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
@@ -3308,8 +3314,8 @@ static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
}
// Record this non-undef element for later.
- if (!PrevElt || PrevElt->first != Val)
- PrevElt = std::make_pair(Val, Idx);
+ if (!PrevElt || PrevElt->first != *Elt)
+ PrevElt = std::make_pair(*Elt, Idx);
}
// We need to have logged a step for this to count as a legal index sequence.
@@ -3318,21 +3324,12 @@ static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
// Loop back through the sequence and validate elements we might have skipped
// while waiting for a valid step. While doing this, log any sequence addend.
- for (unsigned Idx = 0; Idx < NumElts; Idx++) {
- if (Op.getOperand(Idx).isUndef())
+ for (auto [Idx, Elt] : enumerate(Elts)) {
+ if (!Elt)
continue;
- uint64_t Val;
- if (IsInteger) {
- Val = Op.getConstantOperandVal(Idx) &
- maskTrailingOnes<uint64_t>(EltSizeInBits);
- } else {
- Val = *getExactInteger(
- cast<ConstantFPSDNode>(Op.getOperand(Idx))->getValueAPF(),
- EltSizeInBits);
- }
uint64_t ExpectedVal =
(int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
- int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
+ int64_t Addend = SignExtend64(*Elt - ExpectedVal, EltSizeInBits);
if (!SeqAddend)
SeqAddend = Addend;
else if (Addend != SeqAddend)
@@ -3598,7 +3595,7 @@ static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
// Try and match index sequences, which we can lower to the vid instruction
// with optional modifications. An all-undef vector is matched by
// getSplatValue, above.
- if (auto SimpleVID = isSimpleVIDSequence(Op)) {
+ if (auto SimpleVID = isSimpleVIDSequence(Op, Op.getScalarValueSizeInBits())) {
int64_t StepNumerator = SimpleVID->StepNumerator;
unsigned StepDenominator = SimpleVID->StepDenominator;
int64_t Addend = SimpleVID->Addend;
@@ -3853,11 +3850,10 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
// If we're compiling for an exact VLEN value, we can split our work per
// register in the register group.
- const unsigned MinVLen = Subtarget.getRealMinVLen();
- const unsigned MaxVLen = Subtarget.getRealMaxVLen();
- if (MinVLen == MaxVLen && VT.getSizeInBits().getKnownMinValue() > MinVLen) {
+ if (const auto VLen = Subtarget.getRealVLen();
+ VLen && VT.getSizeInBits().getKnownMinValue() > *VLen) {
MVT ElemVT = VT.getVectorElementType();
- unsigned ElemsPerVReg = MinVLen / ElemVT.getFixedSizeInBits();
+ unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits();
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
MVT OneRegVT = MVT::getVectorVT(ElemVT, ElemsPerVReg);
MVT M1VT = getContainerForFixedLengthVector(DAG, OneRegVT, Subtarget);
@@ -4768,9 +4764,8 @@ static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN,
// If we don't know exact data layout, not much we can do. If this
// is already m1 or smaller, no point in splitting further.
- const unsigned MinVLen = Subtarget.getRealMinVLen();
- const unsigned MaxVLen = Subtarget.getRealMaxVLen();
- if (MinVLen != MaxVLen || VT.getSizeInBits().getFixedValue() <= MinVLen)
+ const auto VLen = Subtarget.getRealVLen();
+ if (!VLen || VT.getSizeInBits().getFixedValue() <= *VLen)
return SDValue();
// Avoid picking up bitrotate patterns which we have a linear-in-lmul
@@ -4781,7 +4776,7 @@ static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN,
return SDValue();
MVT ElemVT = VT.getVectorElementType();
- unsigned ElemsPerVReg = MinVLen / ElemVT.getFixedSizeInBits();
+ unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits();
unsigned VRegsPerSrc = NumElts / ElemsPerVReg;
SmallVector<std::pair<int, SmallVector<int>>>
@@ -5759,6 +5754,10 @@ static unsigned getRISCVVLOp(SDValue Op) {
VP_CASE(SINT_TO_FP) // VP_SINT_TO_FP
VP_CASE(UINT_TO_FP) // VP_UINT_TO_FP
VP_CASE(BITREVERSE) // VP_BITREVERSE
+ VP_CASE(SADDSAT) // VP_SADDSAT
+ VP_CASE(UADDSAT) // VP_UADDSAT
+ VP_CASE(SSUBSAT) // VP_SSUBSAT
+ VP_CASE(USUBSAT) // VP_USUBSAT
VP_CASE(BSWAP) // VP_BSWAP
VP_CASE(CTLZ) // VP_CTLZ
VP_CASE(CTTZ) // VP_CTTZ
@@ -6798,6 +6797,10 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::VP_UDIV:
case ISD::VP_SREM:
case ISD::VP_UREM:
+ case ISD::VP_UADDSAT:
+ case ISD::VP_USUBSAT:
+ case ISD::VP_SADDSAT:
+ case ISD::VP_SSUBSAT:
return lowerVPOp(Op, DAG);
case ISD::VP_AND:
case ISD::VP_OR:
@@ -7384,6 +7387,26 @@ SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if (SDValue V = combineSelectToBinOp(Op.getNode(), DAG, Subtarget))
return V;
+ // (select c, c1, c2) -> (add (czero_nez c2 - c1, c), c1)
+ // (select c, c1, c2) -> (add (czero_eqz c1 - c2, c), c2)
+ if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV)) {
+ const APInt &TrueVal = TrueV->getAsAPIntVal();
+ const APInt &FalseVal = FalseV->getAsAPIntVal();
+ const int TrueValCost = RISCVMatInt::getIntMatCost(
+ TrueVal, Subtarget.getXLen(), Subtarget, /*CompressionCost=*/true);
+ const int FalseValCost = RISCVMatInt::getIntMatCost(
+ FalseVal, Subtarget.getXLen(), Subtarget, /*CompressionCost=*/true);
+ bool IsCZERO_NEZ = TrueValCost <= FalseValCost;
+ SDValue LHSVal = DAG.getConstant(
+ IsCZERO_NEZ ? FalseVal - TrueVal : TrueVal - FalseVal, DL, VT);
+ SDValue RHSVal =
+ DAG.getConstant(IsCZERO_NEZ ? TrueVal : FalseVal, DL, VT);
+ SDValue CMOV =
+ DAG.getNode(IsCZERO_NEZ ? RISCVISD::CZERO_NEZ : RISCVISD::CZERO_EQZ,
+ DL, VT, LHSVal, CondV);
+ return DAG.getNode(ISD::ADD, DL, VT, CMOV, RHSVal);
+ }
+
// (select c, t, f) -> (or (czero_eqz t, c), (czero_nez f, c))
// Unless we have the short forward branch optimization.
if (!Subtarget.hasConditionalMoveFusion())
@@ -8313,15 +8336,13 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
// constant index, we can always perform the extract in m1 (or
// smaller) as we can determine the register corresponding to
// the index in the register group.
- const unsigned MinVLen = Subtarget.getRealMinVLen();
- const unsigned MaxVLen = Subtarget.getRealMaxVLen();
+ const auto VLen = Subtarget.getRealVLen();
if (auto *IdxC = dyn_cast<ConstantSDNode>(Idx);
- IdxC && MinVLen == MaxVLen &&
- VecVT.getSizeInBits().getKnownMinValue() > MinVLen) {
+ IdxC && VLen && VecVT.getSizeInBits().getKnownMinValue() > *VLen) {
MVT M1VT = getLMUL1VT(ContainerVT);
unsigned OrigIdx = IdxC->getZExtValue();
EVT ElemVT = VecVT.getVectorElementType();
- unsigned ElemsPerVReg = MinVLen / ElemVT.getFixedSizeInBits();
+ unsigned ElemsPerVReg = *VLen / ElemVT.getFixedSizeInBits();
unsigned RemIdx = OrigIdx % ElemsPerVReg;
unsigned SubRegIdx = OrigIdx / ElemsPerVReg;
unsigned ExtractIdx =
@@ -9782,15 +9803,14 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
if (OrigIdx == 0)
return Op;
- const unsigned MinVLen = Subtarget.getRealMinVLen();
- const unsigned MaxVLen = Subtarget.getRealMaxVLen();
+ const auto VLen = Subtarget.getRealVLen();
// If the subvector vector is a fixed-length type and we don't know VLEN
// exactly, we cannot use subregister manipulation to simplify the codegen; we
// don't know which register of a LMUL group contains the specific subvector
// as we only know the minimum register size. Therefore we must slide the
// vector group down the full amount.
- if (SubVecVT.isFixedLengthVector() && MinVLen != MaxVLen) {
+ if (SubVecVT.isFixedLengthVector() && !VLen) {
MVT ContainerVT = VecVT;
if (VecVT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(VecVT);
@@ -9837,8 +9857,8 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
// and decomposeSubvectorInsertExtractToSubRegs takes this into account. So if
// we have a fixed length subvector, we need to adjust the index by 1/vscale.
if (SubVecVT.isFixedLengthVector()) {
- assert(MinVLen == MaxVLen);
- unsigned Vscale = MinVLen / RISCV::RVVBitsPerBlock;
+ assert(VLen);
+ unsigned Vscale = *VLen / RISCV::RVVBitsPerBlock;
auto Decompose =
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
VecVT, ContainerSubVecVT, OrigIdx / Vscale, TRI);
@@ -12872,6 +12892,7 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineSubOfBoolean(N, DAG))
return V;
+ EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// fold (sub 0, (setcc x, 0, setlt)) -> (sra x, xlen - 1)
@@ -12879,7 +12900,6 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
isNullConstant(N1.getOperand(1))) {
ISD::CondCode CCVal = cast<CondCodeSDNode>(N1.getOperand(2))->get();
if (CCVal == ISD::SETLT) {
- EVT VT = N->getValueType(0);
SDLoc DL(N);
unsigned ShAmt = N0.getValueSizeInBits() - 1;
return DAG.getNode(ISD::SRA, DL, VT, N1.getOperand(0),
@@ -12887,6 +12907,29 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
}
}
+ // sub (zext, zext) -> sext (sub (zext, zext))
+ // where the sum of the extend widths match, and the inner zexts
+ // add at least one bit. (For profitability on rvv, we use a
+ // power of two for both inner and outer extend.)
+ if (VT.isVector() && Subtarget.getTargetLowering()->isTypeLegal(VT) &&
+ N0.getOpcode() == N1.getOpcode() && N0.getOpcode() == ISD::ZERO_EXTEND &&
+ N0.hasOneUse() && N1.hasOneUse()) {
+ SDValue Src0 = N0.getOperand(0);
+ SDValue Src1 = N1.getOperand(0);
+ EVT SrcVT = Src0.getValueType();
+ if (Subtarget.getTargetLowering()->isTypeLegal(SrcVT) &&
+ SrcVT == Src1.getValueType() && SrcVT.getScalarSizeInBits() >= 8 &&
+ SrcVT.getScalarSizeInBits() < VT.getScalarSizeInBits() / 2) {
+ LLVMContext &C = *DAG.getContext();
+ EVT ElemVT = VT.getVectorElementType().getHalfSizedIntegerVT(C);
+ EVT NarrowVT = EVT::getVectorVT(C, ElemVT, VT.getVectorElementCount());
+ Src0 = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Src0), NarrowVT, Src0);
+ Src1 = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(Src1), NarrowVT, Src1);
+ return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT,
+ DAG.getNode(ISD::SUB, SDLoc(N), NarrowVT, Src0, Src1));
+ }
+ }
+
// fold (sub x, (select lhs, rhs, cc, 0, y)) ->
// (select lhs, rhs, cc, x, (sub x, y))
return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false, Subtarget);
@@ -15978,7 +16021,10 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
if (Index.getOpcode() == ISD::BUILD_VECTOR &&
MGN->getExtensionType() == ISD::NON_EXTLOAD && isTypeLegal(VT)) {
- if (std::optional<VIDSequence> SimpleVID = isSimpleVIDSequence(Index);
+ // The sequence will be XLenVT, not the type of Index. Tell
+ // isSimpleVIDSequence this so we avoid overflow.
+ if (std::optional<VIDSequence> SimpleVID =
+ isSimpleVIDSequence(Index, Subtarget.getXLen());
SimpleVID && SimpleVID->StepDenominator == 1) {
const int64_t StepNumerator = SimpleVID->StepNumerator;
const int64_t Addend = SimpleVID->Addend;
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index ff21fe1d4064..af864ba0fbc4 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -143,19 +143,35 @@ static bool isCompressedReg(Register Reg) {
// Return true if MI is a load for which there exists a compressed version.
static bool isCompressibleLoad(const MachineInstr &MI) {
const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
- const unsigned Opcode = MI.getOpcode();
- return Opcode == RISCV::LW || (!STI.is64Bit() && Opcode == RISCV::FLW) ||
- Opcode == RISCV::LD || Opcode == RISCV::FLD;
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::LW:
+ case RISCV::LD:
+ return STI.hasStdExtCOrZca();
+ case RISCV::FLW:
+ return !STI.is64Bit() && STI.hasStdExtCOrZcfOrZce();
+ case RISCV::FLD:
+ return STI.hasStdExtCOrZcd();
+ }
}
// Return true if MI is a store for which there exists a compressed version.
static bool isCompressibleStore(const MachineInstr &MI) {
const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
- const unsigned Opcode = MI.getOpcode();
- return Opcode == RISCV::SW || (!STI.is64Bit() && Opcode == RISCV::FSW) ||
- Opcode == RISCV::SD || Opcode == RISCV::FSD;
+ switch (MI.getOpcode()) {
+ default:
+ return false;
+ case RISCV::SW:
+ case RISCV::SD:
+ return STI.hasStdExtCOrZca();
+ case RISCV::FSW:
+ return !STI.is64Bit() && STI.hasStdExtCOrZcfOrZce();
+ case RISCV::FSD:
+ return STI.hasStdExtCOrZcd();
+ }
}
// Find a single register and/or large offset which, if compressible, would
@@ -324,8 +340,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
const RISCVInstrInfo &TII = *STI.getInstrInfo();
// This optimization only makes sense if compressed instructions are emitted.
- // FIXME: Support Zca, Zcf, Zcd granularity.
- if (!STI.hasStdExtC())
+ if (!STI.hasStdExtCOrZca())
return false;
for (MachineBasicBlock &MBB : Fn) {
diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index d15cb611ae66..0be681de3daf 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -88,20 +88,25 @@ multiclass LMULWriteResMXVariant<string name, SchedPredicateBase Pred,
let ReleaseAtCycles = noPredReleaseCycles;
}
+ // Define SchedVars
+ def nameMX # PredSchedVar
+ : SchedVar<Pred, [!cast<SchedWriteRes>(NAME # nameMX # "_Pred")]>;
+ def nameMX # NoPredSchedVar
+ : SchedVar<NoSchedPred, [!cast<SchedWriteRes>(NAME # nameMX #"_NoPred")]>;
+ // Allow multiclass to refer to SchedVars -- need to have NAME prefix.
+ defvar PredSchedVar = !cast<SchedVar>(NAME # nameMX # PredSchedVar);
+ defvar NoPredSchedVar = !cast<SchedVar>(NAME # nameMX # NoPredSchedVar);
+
// Tie behavior to predicate
- def NAME # nameMX # "_Variant" : SchedWriteVariant<[
- SchedVar<Pred, [!cast<SchedWriteRes>(NAME # nameMX # "_Pred")]>,
- SchedVar<NoSchedPred, [!cast<SchedWriteRes>(NAME # nameMX # "_NoPred")]>
- ]>;
+ def NAME # nameMX # "_Variant"
+ : SchedWriteVariant<[PredSchedVar, NoPredSchedVar]>;
def : SchedAlias<
!cast<SchedReadWrite>(nameMX),
!cast<SchedReadWrite>(NAME # nameMX # "_Variant")>;
if IsWorstCase then {
- def NAME # name # "_WorstCase_Variant" : SchedWriteVariant<[
- SchedVar<Pred, [!cast<SchedWriteRes>(NAME # nameMX # "_Pred")]>,
- SchedVar<NoSchedPred, [!cast<SchedWriteRes>(NAME # nameMX # "_NoPred")]>
- ]>;
+ def NAME # name # "_WorstCase_Variant"
+ : SchedWriteVariant<[PredSchedVar, NoPredSchedVar]>;
def : SchedAlias<
!cast<SchedReadWrite>(name # "_WorstCase"),
!cast<SchedReadWrite>(NAME # name # "_WorstCase_Variant")>;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 4b60d7aff22a..9ebf278d6749 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -143,6 +143,10 @@ public:
#include "RISCVGenSubtargetInfo.inc"
bool hasStdExtCOrZca() const { return HasStdExtC || HasStdExtZca; }
+ bool hasStdExtCOrZcd() const { return HasStdExtC || HasStdExtZcd; }
+ bool hasStdExtCOrZcfOrZce() const {
+ return HasStdExtC || HasStdExtZcf || HasStdExtZce;
+ }
bool hasStdExtZvl() const { return ZvlLen != 0; }
bool hasStdExtFOrZfinx() const { return HasStdExtF || HasStdExtZfinx; }
bool hasStdExtDOrZdinx() const { return HasStdExtD || HasStdExtZdinx; }
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index adef40e19cba..3e20e451410f 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -84,7 +84,7 @@ static cl::opt<bool> EnableRISCVDeadRegisterElimination(
static cl::opt<bool>
EnableSinkFold("riscv-enable-sink-fold",
cl::desc("Enable sinking and folding of instruction copies"),
- cl::init(false), cl::Hidden);
+ cl::init(true), cl::Hidden);
static cl::opt<bool>
EnableLoopDataPrefetch("riscv-enable-loop-data-prefetch", cl::Hidden,
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index e6e3560d02f5..28a63b93b43b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -619,7 +619,8 @@ class GroupBuiltin<string name, Op operation> {
!eq(operation, OpGroupNonUniformShuffleDown),
!eq(operation, OpGroupBroadcast),
!eq(operation, OpGroupNonUniformBroadcast),
- !eq(operation, OpGroupNonUniformBroadcastFirst));
+ !eq(operation, OpGroupNonUniformBroadcastFirst),
+ !eq(operation, OpGroupNonUniformRotateKHR));
bit HasBoolArg = !or(!and(IsAllOrAny, !eq(IsAllEqual, false)), IsBallot, IsLogical);
}
@@ -877,6 +878,10 @@ defm : DemangledGroupBuiltin<"group_non_uniform_scan_inclusive_logical_xors", Wo
defm : DemangledGroupBuiltin<"group_non_uniform_scan_exclusive_logical_xors", WorkOrSub, OpGroupNonUniformLogicalXor>;
defm : DemangledGroupBuiltin<"group_clustered_reduce_logical_xor", WorkOrSub, OpGroupNonUniformLogicalXor>;
+// cl_khr_subgroup_rotate / SPV_KHR_subgroup_rotate
+defm : DemangledGroupBuiltin<"group_rotate", OnlySub, OpGroupNonUniformRotateKHR>;
+defm : DemangledGroupBuiltin<"group_clustered_rotate", OnlySub, OpGroupNonUniformRotateKHR>;
+
// cl_khr_work_group_uniform_arithmetic / SPV_KHR_uniform_group_instructions
defm : DemangledGroupBuiltin<"group_reduce_imul", OnlyWork, OpGroupIMulKHR>;
defm : DemangledGroupBuiltin<"group_reduce_mulu", OnlyWork, OpGroupIMulKHR>;
diff --git a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
index cc438b2bb8d4..10569ef0468b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCallLowering.cpp
@@ -150,7 +150,8 @@ getKernelArgTypeQual(const Function &F, unsigned ArgIdx) {
static SPIRVType *getArgSPIRVType(const Function &F, unsigned ArgIdx,
SPIRVGlobalRegistry *GR,
- MachineIRBuilder &MIRBuilder) {
+ MachineIRBuilder &MIRBuilder,
+ const SPIRVSubtarget &ST) {
// Read argument's access qualifier from metadata or default.
SPIRV::AccessQualifier::AccessQualifier ArgAccessQual =
getArgAccessQual(F, ArgIdx);
@@ -169,8 +170,8 @@ static SPIRVType *getArgSPIRVType(const Function &F, unsigned ArgIdx,
if (MDTypeStr.ends_with("*"))
ResArgType = GR->getOrCreateSPIRVTypeByName(
MDTypeStr, MIRBuilder,
- addressSpaceToStorageClass(
- OriginalArgType->getPointerAddressSpace()));
+ addressSpaceToStorageClass(OriginalArgType->getPointerAddressSpace(),
+ ST));
else if (MDTypeStr.ends_with("_t"))
ResArgType = GR->getOrCreateSPIRVTypeByName(
"opencl." + MDTypeStr.str(), MIRBuilder,
@@ -206,6 +207,10 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
assert(GR && "Must initialize the SPIRV type registry before lowering args.");
GR->setCurrentFunc(MIRBuilder.getMF());
+ // Get access to information about available extensions
+ const SPIRVSubtarget *ST =
+ static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
+
// Assign types and names to all args, and store their types for later.
FunctionType *FTy = getOriginalFunctionType(F);
SmallVector<SPIRVType *, 4> ArgTypeVRegs;
@@ -216,7 +221,7 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
// TODO: handle the case of multiple registers.
if (VRegs[i].size() > 1)
return false;
- auto *SpirvTy = getArgSPIRVType(F, i, GR, MIRBuilder);
+ auto *SpirvTy = getArgSPIRVType(F, i, GR, MIRBuilder, *ST);
GR->assignSPIRVTypeToVReg(SpirvTy, VRegs[i][0], MIRBuilder.getMF());
ArgTypeVRegs.push_back(SpirvTy);
@@ -318,10 +323,6 @@ bool SPIRVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
if (F.hasName())
buildOpName(FuncVReg, F.getName(), MIRBuilder);
- // Get access to information about available extensions
- const auto *ST =
- static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
-
// Handle entry points and function linkage.
if (isEntryPoint(F)) {
const auto &STI = MIRBuilder.getMF().getSubtarget<SPIRVSubtarget>();
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index 47fec745c3f1..a1cb630f1aa4 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -709,7 +709,10 @@ SPIRVType *SPIRVGlobalRegistry::createSPIRVType(
// TODO: change the implementation once opaque pointers are supported
// in the SPIR-V specification.
SpvElementType = getOrCreateSPIRVIntegerType(8, MIRBuilder);
- auto SC = addressSpaceToStorageClass(PType->getAddressSpace());
+ // Get access to information about available extensions
+ const SPIRVSubtarget *ST =
+ static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
+ auto SC = addressSpaceToStorageClass(PType->getAddressSpace(), *ST);
// Null pointer means we have a loop in type definitions, make and
// return corresponding OpTypeForwardPointer.
if (SpvElementType == nullptr) {
diff --git a/llvm/lib/Target/SPIRV/SPIRVISelLowering.h b/llvm/lib/Target/SPIRV/SPIRVISelLowering.h
index f317b2620719..d34f802e9d88 100644
--- a/llvm/lib/Target/SPIRV/SPIRVISelLowering.h
+++ b/llvm/lib/Target/SPIRV/SPIRVISelLowering.h
@@ -31,6 +31,9 @@ public:
return true;
}
+ // prevent creation of jump tables
+ bool areJTsAllowed(const Function *) const override { return false; }
+
// This is to prevent sexts of non-i64 vector indices which are generated
// within general IRTranslator hence type generation for it is omitted.
MVT getVectorIdxTy(const DataLayout &DL) const override {
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
index 0f11bc34d176..7c5252e8cb37 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.td
@@ -430,6 +430,10 @@ def OpGenericCastToPtrExplicit : Op<123, (outs ID:$r), (ins TYPE:$t, ID:$p, Stor
"$r = OpGenericCastToPtrExplicit $t $p $s">;
def OpBitcast : UnOp<"OpBitcast", 124>;
+// SPV_INTEL_usm_storage_classes
+def OpPtrCastToCrossWorkgroupINTEL : UnOp<"OpPtrCastToCrossWorkgroupINTEL", 5934>;
+def OpCrossWorkgroupCastToPtrINTEL : UnOp<"OpCrossWorkgroupCastToPtrINTEL", 5938>;
+
// 3.42.12 Composite Instructions
def OpVectorExtractDynamic: Op<77, (outs ID:$res), (ins TYPE:$type, vID:$vec, ID:$idx),
@@ -765,6 +769,11 @@ def OpGroupNonUniformLogicalAnd: OpGroupNUGroup<"LogicalAnd", 362>;
def OpGroupNonUniformLogicalOr: OpGroupNUGroup<"LogicalOr", 363>;
def OpGroupNonUniformLogicalXor: OpGroupNUGroup<"LogicalXor", 364>;
+// SPV_KHR_subgroup_rotate
+def OpGroupNonUniformRotateKHR: Op<4431, (outs ID:$res),
+ (ins TYPE:$type, ID:$scope, ID:$value, ID:$delta, variable_ops),
+ "$res = OpGroupNonUniformRotateKHR $type $scope $value $delta">;
+
// 3.49.7, Constant-Creation Instructions
// - SPV_INTEL_function_pointers
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index 53d19a1e3138..7258d3b4d88e 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -828,8 +828,18 @@ static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
}
}
+static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
+ switch (SC) {
+ case SPIRV::StorageClass::DeviceOnlyINTEL:
+ case SPIRV::StorageClass::HostOnlyINTEL:
+ return true;
+ default:
+ return false;
+ }
+}
+
// In SPIR-V address space casting can only happen to and from the Generic
-// storage class. We can also only case Workgroup, CrossWorkgroup, or Function
+// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
// pointers to and from Generic pointers. As such, we can convert e.g. from
// Workgroup to Function by going via a Generic pointer as an intermediary. All
// other combinations can only be done by a bitcast, and are probably not safe.
@@ -862,13 +872,17 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
- // Casting from an eligable pointer to Generic.
+ // don't generate a cast between identical storage classes
+ if (SrcSC == DstSC)
+ return true;
+
+ // Casting from an eligible pointer to Generic.
if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
- // Casting from Generic to an eligable pointer.
+ // Casting from Generic to an eligible pointer.
if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
- // Casting between 2 eligable pointers using Generic as an intermediary.
+ // Casting between 2 eligible pointers using Generic as an intermediary.
if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
@@ -886,6 +900,16 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
.addUse(Tmp)
.constrainAllUses(TII, TRI, RBI);
}
+
+ // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
+ // be applied
+ if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
+ return selectUnOp(ResVReg, ResType, I,
+ SPIRV::OpPtrCastToCrossWorkgroupINTEL);
+ if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
+ return selectUnOp(ResVReg, ResType, I,
+ SPIRV::OpCrossWorkgroupCastToPtrINTEL);
+
// TODO Should this case just be disallowed completely?
// We're casting 2 other arbitrary address spaces, so have to bitcast.
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
@@ -1545,7 +1569,7 @@ bool SPIRVInstructionSelector::selectGlobalValue(
}
SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
PointerBaseType, I, TII,
- addressSpaceToStorageClass(GV->getAddressSpace()));
+ addressSpaceToStorageClass(GV->getAddressSpace(), STI));
std::string GlobalIdent;
if (!GV->hasName()) {
@@ -1618,7 +1642,7 @@ bool SPIRVInstructionSelector::selectGlobalValue(
unsigned AddrSpace = GV->getAddressSpace();
SPIRV::StorageClass::StorageClass Storage =
- addressSpaceToStorageClass(AddrSpace);
+ addressSpaceToStorageClass(AddrSpace, STI);
bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
Storage != SPIRV::StorageClass::Function;
SPIRV::LinkageType::LinkageType LnkType =
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
index 011a550a7b3d..4f2e7a240fc2 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizerInfo.cpp
@@ -102,14 +102,16 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
const LLT p2 = LLT::pointer(2, PSize); // UniformConstant
const LLT p3 = LLT::pointer(3, PSize); // Workgroup
const LLT p4 = LLT::pointer(4, PSize); // Generic
- const LLT p5 = LLT::pointer(5, PSize); // Input
+ const LLT p5 =
+ LLT::pointer(5, PSize); // Input, SPV_INTEL_usm_storage_classes (Device)
+ const LLT p6 = LLT::pointer(6, PSize); // SPV_INTEL_usm_storage_classes (Host)
// TODO: remove copy-pasting here by using concatenation in some way.
auto allPtrsScalarsAndVectors = {
- p0, p1, p2, p3, p4, p5, s1, s8, s16,
- s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8,
- v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1,
- v8s8, v8s16, v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
+ p0, p1, p2, p3, p4, p5, p6, s1, s8, s16,
+ s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8, v3s16,
+ v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8, v8s16,
+ v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
auto allScalarsAndVectors = {
s1, s8, s16, s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64,
@@ -133,8 +135,8 @@ SPIRVLegalizerInfo::SPIRVLegalizerInfo(const SPIRVSubtarget &ST) {
auto allFloatAndIntScalars = allIntScalars;
- auto allPtrs = {p0, p1, p2, p3, p4, p5};
- auto allWritablePtrs = {p0, p1, p3, p4};
+ auto allPtrs = {p0, p1, p2, p3, p4, p5, p6};
+ auto allWritablePtrs = {p0, p1, p3, p4, p5, p6};
for (auto Opc : TypeFoldingSupportingOpcs)
getActionDefinitionsBuilder(Opc).custom();
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index dbda2871e153..3be28c97d953 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -1063,12 +1063,28 @@ void addInstrRequirements(const MachineInstr &MI,
Reqs.addCapability(SPIRV::Capability::ExpectAssumeKHR);
}
break;
+ case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
+ case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
+ if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
+ Reqs.addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
+ Reqs.addCapability(SPIRV::Capability::USMStorageClassesINTEL);
+ }
+ break;
case SPIRV::OpConstantFunctionPointerINTEL:
if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
}
break;
+ case SPIRV::OpGroupNonUniformRotateKHR:
+ if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
+ report_fatal_error("OpGroupNonUniformRotateKHR instruction requires the "
+ "following SPIR-V extension: SPV_KHR_subgroup_rotate",
+ false);
+ Reqs.addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
+ Reqs.addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
+ Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
+ break;
case SPIRV::OpGroupIMulKHR:
case SPIRV::OpGroupFMulKHR:
case SPIRV::OpGroupBitwiseAndKHR:
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index cbc16fa98661..144216896eb6 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -122,6 +122,9 @@ static void foldConstantsIntoIntrinsics(MachineFunction &MF) {
static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR,
MachineIRBuilder MIB) {
+ // Get access to information about available extensions
+ const SPIRVSubtarget *ST =
+ static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
SmallVector<MachineInstr *, 10> ToErase;
for (MachineBasicBlock &MBB : MF) {
for (MachineInstr &MI : MBB) {
@@ -141,7 +144,7 @@ static void insertBitcasts(MachineFunction &MF, SPIRVGlobalRegistry *GR,
getMDOperandAsType(MI.getOperand(3).getMetadata(), 0), MIB);
SPIRVType *AssignedPtrType = GR->getOrCreateSPIRVPointerType(
BaseTy, MI, *MF.getSubtarget<SPIRVSubtarget>().getInstrInfo(),
- addressSpaceToStorageClass(MI.getOperand(4).getImm()));
+ addressSpaceToStorageClass(MI.getOperand(4).getImm(), *ST));
// If the bitcast would be redundant, replace all uses with the source
// register.
@@ -250,6 +253,10 @@ Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
MachineIRBuilder MIB) {
+ // Get access to information about available extensions
+ const SPIRVSubtarget *ST =
+ static_cast<const SPIRVSubtarget *>(&MIB.getMF().getSubtarget());
+
MachineRegisterInfo &MRI = MF.getRegInfo();
SmallVector<MachineInstr *, 10> ToErase;
@@ -269,7 +276,7 @@ static void generateAssignInstrs(MachineFunction &MF, SPIRVGlobalRegistry *GR,
getMDOperandAsType(MI.getOperand(2).getMetadata(), 0), MIB);
SPIRVType *AssignedPtrType = GR->getOrCreateSPIRVPointerType(
BaseTy, MI, *MF.getSubtarget<SPIRVSubtarget>().getInstrInfo(),
- addressSpaceToStorageClass(MI.getOperand(3).getImm()));
+ addressSpaceToStorageClass(MI.getOperand(3).getImm(), *ST));
MachineInstr *Def = MRI.getVRegDef(Reg);
assert(Def && "Expecting an instruction that defines the register");
insertAssignInstr(Reg, nullptr, AssignedPtrType, GR, MIB,
diff --git a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
index e186154aa408..79f16146ccd9 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVSubtarget.cpp
@@ -49,6 +49,12 @@ cl::list<SPIRV::Extension::Extension> Extensions(
clEnumValN(SPIRV::Extension::SPV_INTEL_optnone, "SPV_INTEL_optnone",
"Adds OptNoneINTEL value for Function Control mask that "
"indicates a request to not optimize the function."),
+ clEnumValN(SPIRV::Extension::SPV_INTEL_usm_storage_classes,
+ "SPV_INTEL_usm_storage_classes",
+ "Introduces two new storage classes that are sub classes of "
+ "the CrossWorkgroup storage class "
+ "that provides additional information that can enable "
+ "optimization."),
clEnumValN(SPIRV::Extension::SPV_INTEL_subgroups, "SPV_INTEL_subgroups",
"Allows work items in a subgroup to share data without the "
"use of local memory and work group barriers, and to "
@@ -75,6 +81,10 @@ cl::list<SPIRV::Extension::Extension> Extensions(
"Allows to use the LinkOnceODR linkage type that is to let "
"a function or global variable to be merged with other functions "
"or global variables of the same name when linkage occurs."),
+ clEnumValN(SPIRV::Extension::SPV_KHR_subgroup_rotate,
+ "SPV_KHR_subgroup_rotate",
+ "Adds a new instruction that enables rotating values across "
+ "invocations within a subgroup."),
clEnumValN(SPIRV::Extension::SPV_INTEL_function_pointers,
"SPV_INTEL_function_pointers",
"Allows translation of function pointers.")));
diff --git a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
index 4e5ac0d531b2..b022b97408d7 100644
--- a/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
+++ b/llvm/lib/Target/SPIRV/SPIRVSymbolicOperands.td
@@ -455,6 +455,7 @@ defm BitInstructions : CapabilityOperand<6025, 0, 0, [SPV_KHR_bit_instructions],
defm ExpectAssumeKHR : CapabilityOperand<5629, 0, 0, [SPV_KHR_expect_assume], []>;
defm FunctionPointersINTEL : CapabilityOperand<5603, 0, 0, [SPV_INTEL_function_pointers], []>;
defm IndirectReferencesINTEL : CapabilityOperand<5604, 0, 0, [SPV_INTEL_function_pointers], []>;
+defm GroupNonUniformRotateKHR : CapabilityOperand<6026, 0, 0, [SPV_KHR_subgroup_rotate], [GroupNonUniform]>;
defm AtomicFloat32AddEXT : CapabilityOperand<6033, 0, 0, [SPV_EXT_shader_atomic_float_add], []>;
defm AtomicFloat64AddEXT : CapabilityOperand<6034, 0, 0, [SPV_EXT_shader_atomic_float_add], []>;
defm AtomicFloat16AddEXT : CapabilityOperand<6095, 0, 0, [SPV_EXT_shader_atomic_float16_add], []>;
@@ -462,6 +463,7 @@ defm AtomicFloat16MinMaxEXT : CapabilityOperand<5616, 0, 0, [SPV_EXT_shader_atom
defm AtomicFloat32MinMaxEXT : CapabilityOperand<5612, 0, 0, [SPV_EXT_shader_atomic_float_min_max], []>;
defm AtomicFloat64MinMaxEXT : CapabilityOperand<5613, 0, 0, [SPV_EXT_shader_atomic_float_min_max], []>;
defm GroupUniformArithmeticKHR : CapabilityOperand<6400, 0, 0, [SPV_KHR_uniform_group_instructions], []>;
+defm USMStorageClassesINTEL : CapabilityOperand<5935, 0, 0, [SPV_INTEL_usm_storage_classes], [Kernel]>;
//===----------------------------------------------------------------------===//
// Multiclass used to define SourceLanguage enum values and at the same time
@@ -699,6 +701,8 @@ defm IncomingRayPayloadNV : StorageClassOperand<5342, [RayTracingNV]>;
defm ShaderRecordBufferNV : StorageClassOperand<5343, [RayTracingNV]>;
defm PhysicalStorageBufferEXT : StorageClassOperand<5349, [PhysicalStorageBufferAddressesEXT]>;
defm CodeSectionINTEL : StorageClassOperand<5605, [FunctionPointersINTEL]>;
+defm DeviceOnlyINTEL : StorageClassOperand<5936, [USMStorageClassesINTEL]>;
+defm HostOnlyINTEL : StorageClassOperand<5937, [USMStorageClassesINTEL]>;
//===----------------------------------------------------------------------===//
// Multiclass used to define Dim enum values and at the same time
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index 05f766d3ec54..169d7cc93897 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -14,6 +14,7 @@
#include "MCTargetDesc/SPIRVBaseInfo.h"
#include "SPIRV.h"
#include "SPIRVInstrInfo.h"
+#include "SPIRVSubtarget.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -146,15 +147,19 @@ unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC) {
return 3;
case SPIRV::StorageClass::Generic:
return 4;
+ case SPIRV::StorageClass::DeviceOnlyINTEL:
+ return 5;
+ case SPIRV::StorageClass::HostOnlyINTEL:
+ return 6;
case SPIRV::StorageClass::Input:
return 7;
default:
- llvm_unreachable("Unable to get address space id");
+ report_fatal_error("Unable to get address space id");
}
}
SPIRV::StorageClass::StorageClass
-addressSpaceToStorageClass(unsigned AddrSpace) {
+addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI) {
switch (AddrSpace) {
case 0:
return SPIRV::StorageClass::Function;
@@ -166,10 +171,18 @@ addressSpaceToStorageClass(unsigned AddrSpace) {
return SPIRV::StorageClass::Workgroup;
case 4:
return SPIRV::StorageClass::Generic;
+ case 5:
+ return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
+ ? SPIRV::StorageClass::DeviceOnlyINTEL
+ : SPIRV::StorageClass::CrossWorkgroup;
+ case 6:
+ return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
+ ? SPIRV::StorageClass::HostOnlyINTEL
+ : SPIRV::StorageClass::CrossWorkgroup;
case 7:
return SPIRV::StorageClass::Input;
default:
- llvm_unreachable("Unknown address space");
+ report_fatal_error("Unknown address space");
}
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h
index a33dc02f854f..1af53dcd0c4c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.h
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h
@@ -27,6 +27,7 @@ class MachineRegisterInfo;
class Register;
class StringRef;
class SPIRVInstrInfo;
+class SPIRVSubtarget;
// Add the given string as a series of integer operand, inserting null
// terminators and padding to make sure the operands all have 32-bit
@@ -62,7 +63,7 @@ unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC);
// Convert an LLVM IR address space to a SPIR-V storage class.
SPIRV::StorageClass::StorageClass
-addressSpaceToStorageClass(unsigned AddrSpace);
+addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI);
SPIRV::MemorySemantics::MemorySemantics
getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 7c47790d1e35..36f067956e63 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -43,6 +43,8 @@ using namespace llvm;
#define DEBUG_TYPE "wasm-lower"
+extern cl::opt<bool> WasmEmitMultiValue;
+
WebAssemblyTargetLowering::WebAssemblyTargetLowering(
const TargetMachine &TM, const WebAssemblySubtarget &STI)
: TargetLowering(TM), Subtarget(&STI) {
@@ -1288,7 +1290,7 @@ bool WebAssemblyTargetLowering::CanLowerReturn(
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext & /*Context*/) const {
// WebAssembly can only handle returning tuples with multivalue enabled
- return Subtarget->hasMultivalue() || Outs.size() <= 1;
+ return (Subtarget->hasMultivalue() && WasmEmitMultiValue) || Outs.size() <= 1;
}
SDValue WebAssemblyTargetLowering::LowerReturn(
@@ -1296,7 +1298,8 @@ SDValue WebAssemblyTargetLowering::LowerReturn(
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
SelectionDAG &DAG) const {
- assert((Subtarget->hasMultivalue() || Outs.size() <= 1) &&
+ assert(((Subtarget->hasMultivalue() && WasmEmitMultiValue) ||
+ Outs.size() <= 1) &&
"MVP WebAssembly can only return up to one value");
if (!callingConvSupported(CallConv))
fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
index 1e959111a4db..b969b8370a3e 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
@@ -22,6 +22,8 @@
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
+extern cl::opt<bool> WasmEmitMultiValue;
+
WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() = default; // anchor.
MachineFunctionInfo *WebAssemblyFunctionInfo::clone(
@@ -71,7 +73,8 @@ void llvm::computeSignatureVTs(const FunctionType *Ty,
MVT PtrVT = MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits());
if (Results.size() > 1 &&
- !TM.getSubtarget<WebAssemblySubtarget>(ContextFunc).hasMultivalue()) {
+ (!TM.getSubtarget<WebAssemblySubtarget>(ContextFunc).hasMultivalue() ||
+ !WasmEmitMultiValue)) {
// WebAssembly can't lower returns of multiple values without demoting to
// sret unless multivalue is enabled (see
// WebAssemblyTargetLowering::CanLowerReturn). So replace multiple return
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
index 3e2e029695ab..2a84c90c8960 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
@@ -24,6 +24,8 @@
using namespace llvm;
+extern cl::opt<bool> WasmEmitMultiValue;
+
namespace {
enum RuntimeLibcallSignature {
@@ -694,7 +696,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(PtrTy);
break;
case i64_i64_func_f32:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -703,7 +705,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::F32);
break;
case i64_i64_func_f64:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -712,7 +714,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::F64);
break;
case i16_i16_func_i16_i16:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I32);
Rets.push_back(wasm::ValType::I32);
} else {
@@ -722,7 +724,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I32);
break;
case i32_i32_func_i32_i32:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I32);
Rets.push_back(wasm::ValType::I32);
} else {
@@ -732,7 +734,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I32);
break;
case i64_i64_func_i64_i64:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -742,7 +744,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i64_i64:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -754,7 +756,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i64_i64_iPTR:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -767,7 +769,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(PtrTy);
break;
case i64_i64_i64_i64_func_i64_i64_i64_i64:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
@@ -781,7 +783,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i32:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -851,7 +853,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i64_i64_i64_i64_i64_i64:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -865,7 +867,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I64);
break;
case i64_i64_func_i32:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
@@ -874,7 +876,7 @@ void llvm::getLibcallSignature(const WebAssemblySubtarget &Subtarget,
Params.push_back(wasm::ValType::I32);
break;
case i64_i64_func_i64:
- if (Subtarget.hasMultivalue()) {
+ if (Subtarget.hasMultivalue() && WasmEmitMultiValue) {
Rets.push_back(wasm::ValType::I64);
Rets.push_back(wasm::ValType::I64);
} else {
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
index 42043a7b8680..3120b6b67906 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -54,6 +54,15 @@ static cl::opt<bool> WasmDisableFixIrreducibleControlFlowPass(
" irreducible control flow optimization pass"),
cl::init(false));
+// A temporary option to control emission of multivalue until multivalue
+// implementation is stable enough. We currently don't emit multivalue by
+// default even if the feature section allows it.
+// TODO Stabilize multivalue and delete this option
+cl::opt<bool>
+ WasmEmitMultiValue("wasm-emit-multivalue", cl::Hidden,
+ cl::desc("WebAssembly: Emit multivalue in the backend"),
+ cl::init(false));
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeWebAssemblyTarget() {
// Register the target.
RegisterTargetMachine<WebAssemblyTargetMachine> X(
diff --git a/llvm/lib/Target/X86/X86CodeGenPassBuilder.cpp b/llvm/lib/Target/X86/X86CodeGenPassBuilder.cpp
index 4a11dd2e31ac..a620ba911ec6 100644
--- a/llvm/lib/Target/X86/X86CodeGenPassBuilder.cpp
+++ b/llvm/lib/Target/X86/X86CodeGenPassBuilder.cpp
@@ -47,10 +47,9 @@ Error X86CodeGenPassBuilder::addInstSelector(AddMachinePass &) const {
} // namespace
Error X86TargetMachine::buildCodeGenPipeline(
- ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
- MachineFunctionAnalysisManager &, raw_pwrite_stream &Out,
- raw_pwrite_stream *DwoOut, CodeGenFileType FileType,
- CGPassBuilderOption Opt, PassInstrumentationCallbacks *PIC) {
+ ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
+ CodeGenFileType FileType, CGPassBuilderOption Opt,
+ PassInstrumentationCallbacks *PIC) {
auto CGPB = X86CodeGenPassBuilder(*this, Opt, PIC);
- return CGPB.buildPipeline(MPM, MFPM, Out, DwoOut, FileType);
+ return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
}
diff --git a/llvm/lib/Target/X86/X86TargetMachine.h b/llvm/lib/Target/X86/X86TargetMachine.h
index f31c971df958..0fd3e47aaefe 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.h
+++ b/llvm/lib/Target/X86/X86TargetMachine.h
@@ -58,10 +58,9 @@ public:
createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F,
const TargetSubtargetInfo *STI) const override;
- Error buildCodeGenPipeline(ModulePassManager &, MachineFunctionPassManager &,
- MachineFunctionAnalysisManager &,
- raw_pwrite_stream &, raw_pwrite_stream *,
- CodeGenFileType, CGPassBuilderOption,
+ Error buildCodeGenPipeline(ModulePassManager &, raw_pwrite_stream &,
+ raw_pwrite_stream *, CodeGenFileType,
+ CGPassBuilderOption,
PassInstrumentationCallbacks *) override;
bool isJIT() const { return IsJIT; }
diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp
index 4466d50458e1..a4cc757a9214 100644
--- a/llvm/lib/TargetParser/Host.cpp
+++ b/llvm/lib/TargetParser/Host.cpp
@@ -1846,6 +1846,13 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
Features["prefetchi"] = HasLeaf7Subleaf1 && ((EDX >> 14) & 1);
Features["usermsr"] = HasLeaf7Subleaf1 && ((EDX >> 15) & 1);
Features["avx10.1-256"] = HasLeaf7Subleaf1 && ((EDX >> 19) & 1);
+ bool HasAPXF = HasLeaf7Subleaf1 && ((EDX >> 21) & 1);
+ Features["egpr"] = HasAPXF;
+ Features["push2pop2"] = HasAPXF;
+ Features["ppx"] = HasAPXF;
+ Features["ndd"] = HasAPXF;
+ Features["ccmp"] = HasAPXF;
+ Features["cf"] = HasAPXF;
bool HasLeafD = MaxLevel >= 0xd &&
!getX86CpuIDAndInfoEx(0xd, 0x1, &EAX, &EBX, &ECX, &EDX);
diff --git a/llvm/lib/Transforms/IPO/HotColdSplitting.cpp b/llvm/lib/Transforms/IPO/HotColdSplitting.cpp
index fabb3c5fb921..5aefcbf13182 100644
--- a/llvm/lib/Transforms/IPO/HotColdSplitting.cpp
+++ b/llvm/lib/Transforms/IPO/HotColdSplitting.cpp
@@ -215,15 +215,10 @@ bool HotColdSplitting::isFunctionCold(const Function &F) const {
return false;
}
-bool HotColdSplitting::isBasicBlockCold(BasicBlock *BB,
- BranchProbability ColdProbThresh,
- SmallPtrSetImpl<BasicBlock *> &ColdBlocks,
- SmallPtrSetImpl<BasicBlock *> &AnnotatedColdBlocks,
- BlockFrequencyInfo *BFI) const {
- // This block is already part of some outlining region.
- if (ColdBlocks.count(BB))
- return true;
-
+bool HotColdSplitting::isBasicBlockCold(
+ BasicBlock *BB, BranchProbability ColdProbThresh,
+ SmallPtrSetImpl<BasicBlock *> &AnnotatedColdBlocks,
+ BlockFrequencyInfo *BFI) const {
if (BFI) {
if (PSI->isColdBlock(BB, BFI))
return true;
@@ -372,18 +367,12 @@ static int getOutliningPenalty(ArrayRef<BasicBlock *> Region,
return Penalty;
}
-Function *HotColdSplitting::extractColdRegion(
- const BlockSequence &Region, const CodeExtractorAnalysisCache &CEAC,
- DominatorTree &DT, BlockFrequencyInfo *BFI, TargetTransformInfo &TTI,
- OptimizationRemarkEmitter &ORE, AssumptionCache *AC, unsigned Count) {
+// Determine if it is beneficial to split the \p Region.
+bool HotColdSplitting::isSplittingBeneficial(CodeExtractor &CE,
+ const BlockSequence &Region,
+ TargetTransformInfo &TTI) {
assert(!Region.empty());
- // TODO: Pass BFI and BPI to update profile information.
- CodeExtractor CE(Region, &DT, /* AggregateArgs */ false, /* BFI */ nullptr,
- /* BPI */ nullptr, AC, /* AllowVarArgs */ false,
- /* AllowAlloca */ false, /* AllocaBlock */ nullptr,
- /* Suffix */ "cold." + std::to_string(Count));
-
// Perform a simple cost/benefit analysis to decide whether or not to permit
// splitting.
SetVector<Value *> Inputs, Outputs, Sinks;
@@ -394,9 +383,18 @@ Function *HotColdSplitting::extractColdRegion(
LLVM_DEBUG(dbgs() << "Split profitability: benefit = " << OutliningBenefit
<< ", penalty = " << OutliningPenalty << "\n");
if (!OutliningBenefit.isValid() || OutliningBenefit <= OutliningPenalty)
- return nullptr;
+ return false;
- Function *OrigF = Region[0]->getParent();
+ return true;
+}
+
+// Split the single \p EntryPoint cold region. \p CE is the region code
+// extractor.
+Function *HotColdSplitting::extractColdRegion(
+ BasicBlock &EntryPoint, CodeExtractor &CE,
+ const CodeExtractorAnalysisCache &CEAC, BlockFrequencyInfo *BFI,
+ TargetTransformInfo &TTI, OptimizationRemarkEmitter &ORE) {
+ Function *OrigF = EntryPoint.getParent();
if (Function *OutF = CE.extractCodeRegion(CEAC)) {
User *U = *OutF->user_begin();
CallInst *CI = cast<CallInst>(U);
@@ -419,7 +417,7 @@ Function *HotColdSplitting::extractColdRegion(
LLVM_DEBUG(llvm::dbgs() << "Outlined Region: " << *OutF);
ORE.emit([&]() {
return OptimizationRemark(DEBUG_TYPE, "HotColdSplit",
- &*Region[0]->begin())
+ &*EntryPoint.begin())
<< ore::NV("Original", OrigF) << " split cold code into "
<< ore::NV("Split", OutF);
});
@@ -428,9 +426,9 @@ Function *HotColdSplitting::extractColdRegion(
ORE.emit([&]() {
return OptimizationRemarkMissed(DEBUG_TYPE, "ExtractFailed",
- &*Region[0]->begin())
+ &*EntryPoint.begin())
<< "Failed to extract region at block "
- << ore::NV("Block", Region.front());
+ << ore::NV("Block", &EntryPoint);
});
return nullptr;
}
@@ -620,16 +618,18 @@ public:
} // namespace
bool HotColdSplitting::outlineColdRegions(Function &F, bool HasProfileSummary) {
- bool Changed = false;
-
- // The set of cold blocks.
+ // The set of cold blocks outlined.
SmallPtrSet<BasicBlock *, 4> ColdBlocks;
+ // The set of cold blocks cannot be outlined.
+ SmallPtrSet<BasicBlock *, 4> CannotBeOutlinedColdBlocks;
+
// Set of cold blocks obtained with RPOT.
SmallPtrSet<BasicBlock *, 4> AnnotatedColdBlocks;
- // The worklist of non-intersecting regions left to outline.
- SmallVector<OutliningRegion, 2> OutliningWorklist;
+ // The worklist of non-intersecting regions left to outline. The first member
+ // of the pair is the entry point into the region to be outlined.
+ SmallVector<std::pair<BasicBlock *, CodeExtractor>, 2> OutliningWorklist;
// Set up an RPO traversal. Experimentally, this performs better (outlines
// more) than a PO traversal, because we prevent region overlap by keeping
@@ -655,10 +655,18 @@ bool HotColdSplitting::outlineColdRegions(Function &F, bool HasProfileSummary) {
if (ColdBranchProbDenom.getNumOccurrences())
ColdProbThresh = BranchProbability(1, ColdBranchProbDenom.getValue());
+ unsigned OutlinedFunctionID = 1;
// Find all cold regions.
for (BasicBlock *BB : RPOT) {
- if (!isBasicBlockCold(BB, ColdProbThresh, ColdBlocks, AnnotatedColdBlocks,
- BFI))
+ // This block is already part of some outlining region.
+ if (ColdBlocks.count(BB))
+ continue;
+
+ // This block is already part of some region cannot be outlined.
+ if (CannotBeOutlinedColdBlocks.count(BB))
+ continue;
+
+ if (!isBasicBlockCold(BB, ColdProbThresh, AnnotatedColdBlocks, BFI))
continue;
LLVM_DEBUG({
@@ -681,50 +689,69 @@ bool HotColdSplitting::outlineColdRegions(Function &F, bool HasProfileSummary) {
return markFunctionCold(F);
}
- // If this outlining region intersects with another, drop the new region.
- //
- // TODO: It's theoretically possible to outline more by only keeping the
- // largest region which contains a block, but the extra bookkeeping to do
- // this is tricky/expensive.
- bool RegionsOverlap = any_of(Region.blocks(), [&](const BlockTy &Block) {
- return !ColdBlocks.insert(Block.first).second;
- });
- if (RegionsOverlap)
- continue;
+ do {
+ BlockSequence SubRegion = Region.takeSingleEntrySubRegion(*DT);
+ LLVM_DEBUG({
+ dbgs() << "Hot/cold splitting attempting to outline these blocks:\n";
+ for (BasicBlock *BB : SubRegion)
+ BB->dump();
+ });
+
+ // TODO: Pass BFI and BPI to update profile information.
+ CodeExtractor CE(
+ SubRegion, &*DT, /* AggregateArgs */ false, /* BFI */ nullptr,
+ /* BPI */ nullptr, AC, /* AllowVarArgs */ false,
+ /* AllowAlloca */ false, /* AllocaBlock */ nullptr,
+ /* Suffix */ "cold." + std::to_string(OutlinedFunctionID));
+
+ if (CE.isEligible() && isSplittingBeneficial(CE, SubRegion, TTI) &&
+ // If this outlining region intersects with another, drop the new
+ // region.
+ //
+ // TODO: It's theoretically possible to outline more by only keeping
+ // the largest region which contains a block, but the extra
+ // bookkeeping to do this is tricky/expensive.
+ none_of(SubRegion, [&](BasicBlock *Block) {
+ return ColdBlocks.contains(Block);
+ })) {
+ ColdBlocks.insert(SubRegion.begin(), SubRegion.end());
+
+ LLVM_DEBUG({
+ for (auto *Block : SubRegion)
+ dbgs() << " contains cold block:" << Block->getName() << "\n";
+ });
+
+ OutliningWorklist.emplace_back(
+ std::make_pair(SubRegion[0], std::move(CE)));
+ ++OutlinedFunctionID;
+ } else {
+ // The cold block region cannot be outlined.
+ for (auto *Block : SubRegion)
+ if ((DT->dominates(BB, Block) && PDT->dominates(Block, BB)) ||
+ (PDT->dominates(BB, Block) && DT->dominates(Block, BB)))
+ // Will skip this cold block in the loop to save the compile time
+ CannotBeOutlinedColdBlocks.insert(Block);
+ }
+ } while (!Region.empty());
- OutliningWorklist.emplace_back(std::move(Region));
++NumColdRegionsFound;
}
}
if (OutliningWorklist.empty())
- return Changed;
+ return false;
// Outline single-entry cold regions, splitting up larger regions as needed.
- unsigned OutlinedFunctionID = 1;
// Cache and recycle the CodeExtractor analysis to avoid O(n^2) compile-time.
CodeExtractorAnalysisCache CEAC(F);
- do {
- OutliningRegion Region = OutliningWorklist.pop_back_val();
- assert(!Region.empty() && "Empty outlining region in worklist");
- do {
- BlockSequence SubRegion = Region.takeSingleEntrySubRegion(*DT);
- LLVM_DEBUG({
- dbgs() << "Hot/cold splitting attempting to outline these blocks:\n";
- for (BasicBlock *BB : SubRegion)
- BB->dump();
- });
-
- Function *Outlined = extractColdRegion(SubRegion, CEAC, *DT, BFI, TTI,
- ORE, AC, OutlinedFunctionID);
- if (Outlined) {
- ++OutlinedFunctionID;
- Changed = true;
- }
- } while (!Region.empty());
- } while (!OutliningWorklist.empty());
+ for (auto &BCE : OutliningWorklist) {
+ Function *Outlined =
+ extractColdRegion(*BCE.first, BCE.second, CEAC, BFI, TTI, ORE);
+ assert(Outlined && "Should be outlined");
+ (void)Outlined;
+ }
- return Changed;
+ return true;
}
bool HotColdSplitting::run(Module &M) {
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 4176d561363f..77ca36d64029 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -1471,7 +1471,6 @@ private:
OMPRTL_omp_get_num_threads,
OMPRTL_omp_in_parallel,
OMPRTL_omp_get_cancellation,
- OMPRTL_omp_get_thread_limit,
OMPRTL_omp_get_supported_active_levels,
OMPRTL_omp_get_level,
OMPRTL_omp_get_ancestor_thread_num,
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index ed47de287302..33ed1d557537 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1543,11 +1543,14 @@ static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
return !losesInfo;
}
-static Type *shrinkFPConstant(ConstantFP *CFP) {
+static Type *shrinkFPConstant(ConstantFP *CFP, bool PreferBFloat) {
if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext()))
return nullptr; // No constant folding of this.
+ // See if the value can be truncated to bfloat and then reextended.
+ if (PreferBFloat && fitsInFPType(CFP, APFloat::BFloat()))
+ return Type::getBFloatTy(CFP->getContext());
// See if the value can be truncated to half and then reextended.
- if (fitsInFPType(CFP, APFloat::IEEEhalf()))
+ if (!PreferBFloat && fitsInFPType(CFP, APFloat::IEEEhalf()))
return Type::getHalfTy(CFP->getContext());
// See if the value can be truncated to float and then reextended.
if (fitsInFPType(CFP, APFloat::IEEEsingle()))
@@ -1562,7 +1565,7 @@ static Type *shrinkFPConstant(ConstantFP *CFP) {
// Determine if this is a vector of ConstantFPs and if so, return the minimal
// type we can safely truncate all elements to.
-static Type *shrinkFPConstantVector(Value *V) {
+static Type *shrinkFPConstantVector(Value *V, bool PreferBFloat) {
auto *CV = dyn_cast<Constant>(V);
auto *CVVTy = dyn_cast<FixedVectorType>(V->getType());
if (!CV || !CVVTy)
@@ -1582,7 +1585,7 @@ static Type *shrinkFPConstantVector(Value *V) {
if (!CFP)
return nullptr;
- Type *T = shrinkFPConstant(CFP);
+ Type *T = shrinkFPConstant(CFP, PreferBFloat);
if (!T)
return nullptr;
@@ -1597,7 +1600,7 @@ static Type *shrinkFPConstantVector(Value *V) {
}
/// Find the minimum FP type we can safely truncate to.
-static Type *getMinimumFPType(Value *V) {
+static Type *getMinimumFPType(Value *V, bool PreferBFloat) {
if (auto *FPExt = dyn_cast<FPExtInst>(V))
return FPExt->getOperand(0)->getType();
@@ -1605,7 +1608,7 @@ static Type *getMinimumFPType(Value *V) {
// that can accurately represent it. This allows us to turn
// (float)((double)X+2.0) into x+2.0f.
if (auto *CFP = dyn_cast<ConstantFP>(V))
- if (Type *T = shrinkFPConstant(CFP))
+ if (Type *T = shrinkFPConstant(CFP, PreferBFloat))
return T;
// We can only correctly find a minimum type for a scalable vector when it is
@@ -1617,7 +1620,7 @@ static Type *getMinimumFPType(Value *V) {
// Try to shrink a vector of FP constants. This returns nullptr on scalable
// vectors
- if (Type *T = shrinkFPConstantVector(V))
+ if (Type *T = shrinkFPConstantVector(V, PreferBFloat))
return T;
return V->getType();
@@ -1686,8 +1689,10 @@ Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
Type *Ty = FPT.getType();
auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0));
if (BO && BO->hasOneUse()) {
- Type *LHSMinType = getMinimumFPType(BO->getOperand(0));
- Type *RHSMinType = getMinimumFPType(BO->getOperand(1));
+ Type *LHSMinType =
+ getMinimumFPType(BO->getOperand(0), /*PreferBFloat=*/Ty->isBFloatTy());
+ Type *RHSMinType =
+ getMinimumFPType(BO->getOperand(1), /*PreferBFloat=*/Ty->isBFloatTy());
unsigned OpWidth = BO->getType()->getFPMantissaWidth();
unsigned LHSWidth = LHSMinType->getFPMantissaWidth();
unsigned RHSWidth = RHSMinType->getFPMantissaWidth();
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 4af455c37c78..87c8dca7efed 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2387,6 +2387,20 @@ Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
return NonNull;
}
+ if (match(V, m_SExtLike(m_Value(A)))) {
+ if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
+ DoesConsume, Depth))
+ return Builder ? Builder->CreateSExt(AV, V->getType()) : NonNull;
+ return nullptr;
+ }
+
+ if (match(V, m_Trunc(m_Value(A)))) {
+ if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
+ DoesConsume, Depth))
+ return Builder ? Builder->CreateTrunc(AV, V->getType()) : NonNull;
+ return nullptr;
+ }
+
return nullptr;
}
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 393afc915205..33add6d4cd76 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -348,7 +348,7 @@ private:
void instrumentGlobals();
Value *getPC(IRBuilder<> &IRB);
- Value *getSP(IRBuilder<> &IRB);
+ Value *getFP(IRBuilder<> &IRB);
Value *getFrameRecordInfo(IRBuilder<> &IRB);
void instrumentPersonalityFunctions();
@@ -1148,7 +1148,7 @@ Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
// Extract some entropy from the stack pointer for the tags.
// Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
// between functions).
- Value *StackPointerLong = getSP(IRB);
+ Value *StackPointerLong = getFP(IRB);
Value *StackTag =
applyTagMask(IRB, IRB.CreateXor(StackPointerLong,
IRB.CreateLShr(StackPointerLong, 20)));
@@ -1165,7 +1165,7 @@ Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
}
Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB) {
- Value *StackPointerLong = getSP(IRB);
+ Value *StackPointerLong = getFP(IRB);
Value *UARTag =
applyTagMask(IRB, IRB.CreateLShr(StackPointerLong, PointerTagShift));
@@ -1232,7 +1232,7 @@ Value *HWAddressSanitizer::getPC(IRBuilder<> &IRB) {
return IRB.CreatePtrToInt(IRB.GetInsertBlock()->getParent(), IntptrTy);
}
-Value *HWAddressSanitizer::getSP(IRBuilder<> &IRB) {
+Value *HWAddressSanitizer::getFP(IRBuilder<> &IRB) {
if (!CachedSP) {
// FIXME: use addressofreturnaddress (but implement it in aarch64 backend
// first).
@@ -1251,7 +1251,7 @@ Value *HWAddressSanitizer::getSP(IRBuilder<> &IRB) {
Value *HWAddressSanitizer::getFrameRecordInfo(IRBuilder<> &IRB) {
// Prepare ring buffer data.
Value *PC = getPC(IRB);
- Value *SP = getSP(IRB);
+ Value *SP = getFP(IRB);
// Mix SP and PC.
// Assumptions:
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index db05c63f388f..9b6a39e98f5c 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -499,6 +499,8 @@ static Decomposition decompose(Value *V,
if (!Ty->isIntegerTy() || Ty->getIntegerBitWidth() > 64)
return V;
+ bool IsKnownNonNegative = false;
+
// Decompose \p V used with a signed predicate.
if (IsSigned) {
if (auto *CI = dyn_cast<ConstantInt>(V)) {
@@ -507,6 +509,14 @@ static Decomposition decompose(Value *V,
}
Value *Op0;
Value *Op1;
+
+ if (match(V, m_SExt(m_Value(Op0))))
+ V = Op0;
+ else if (match(V, m_NNegZExt(m_Value(Op0)))) {
+ V = Op0;
+ IsKnownNonNegative = true;
+ }
+
if (match(V, m_NSWAdd(m_Value(Op0), m_Value(Op1))))
return MergeResults(Op0, Op1, IsSigned);
@@ -529,7 +539,7 @@ static Decomposition decompose(Value *V,
}
}
- return V;
+ return {V, IsKnownNonNegative};
}
if (auto *CI = dyn_cast<ConstantInt>(V)) {
@@ -539,7 +549,6 @@ static Decomposition decompose(Value *V,
}
Value *Op0;
- bool IsKnownNonNegative = false;
if (match(V, m_ZExt(m_Value(Op0)))) {
IsKnownNonNegative = true;
V = Op0;
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 9235850de92f..6ce9eb3656c9 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -47,11 +47,6 @@ using namespace llvm;
#define DEBUG_TYPE "correlated-value-propagation"
-static cl::opt<bool> CanonicalizeICmpPredicatesToUnsigned(
- "canonicalize-icmp-predicates-to-unsigned", cl::init(true), cl::Hidden,
- cl::desc("Enables canonicalization of signed relational predicates to "
- "unsigned (e.g. sgt => ugt)"));
-
STATISTIC(NumPhis, "Number of phis propagated");
STATISTIC(NumPhiCommon, "Number of phis deleted via common incoming value");
STATISTIC(NumSelects, "Number of selects propagated");
@@ -90,6 +85,8 @@ STATISTIC(NumSaturating,
"Number of saturating arithmetics converted to normal arithmetics");
STATISTIC(NumNonNull, "Number of function pointer arguments marked non-null");
STATISTIC(NumMinMax, "Number of llvm.[us]{min,max} intrinsics removed");
+STATISTIC(NumSMinMax,
+ "Number of llvm.s{min,max} intrinsics simplified to unsigned");
STATISTIC(NumUDivURemsNarrowedExpanded,
"Number of bound udiv's/urem's expanded");
STATISTIC(NumZExt, "Number of non-negative deductions");
@@ -289,9 +286,6 @@ static bool processPHI(PHINode *P, LazyValueInfo *LVI, DominatorTree *DT,
}
static bool processICmp(ICmpInst *Cmp, LazyValueInfo *LVI) {
- if (!CanonicalizeICmpPredicatesToUnsigned)
- return false;
-
// Only for signed relational comparisons of scalar integers.
if (Cmp->getType()->isVectorTy() ||
!Cmp->getOperand(0)->getType()->isIntegerTy())
@@ -528,17 +522,40 @@ static bool processAbsIntrinsic(IntrinsicInst *II, LazyValueInfo *LVI) {
}
// See if this min/max intrinsic always picks it's one specific operand.
+// If not, check whether we can canonicalize signed minmax into unsigned version
static bool processMinMaxIntrinsic(MinMaxIntrinsic *MM, LazyValueInfo *LVI) {
CmpInst::Predicate Pred = CmpInst::getNonStrictPredicate(MM->getPredicate());
- LazyValueInfo::Tristate Result = LVI->getPredicateAt(
- Pred, MM->getLHS(), MM->getRHS(), MM, /*UseBlockValue=*/true);
- if (Result == LazyValueInfo::Unknown)
- return false;
+ ConstantRange LHS_CR = LVI->getConstantRangeAtUse(MM->getOperandUse(0),
+ /*UndefAllowed*/ false);
+ ConstantRange RHS_CR = LVI->getConstantRangeAtUse(MM->getOperandUse(1),
+ /*UndefAllowed*/ false);
+ if (LHS_CR.icmp(Pred, RHS_CR)) {
+ ++NumMinMax;
+ MM->replaceAllUsesWith(MM->getLHS());
+ MM->eraseFromParent();
+ return true;
+ }
+ if (RHS_CR.icmp(Pred, LHS_CR)) {
+ ++NumMinMax;
+ MM->replaceAllUsesWith(MM->getRHS());
+ MM->eraseFromParent();
+ return true;
+ }
- ++NumMinMax;
- MM->replaceAllUsesWith(MM->getOperand(!Result));
- MM->eraseFromParent();
- return true;
+ if (MM->isSigned() &&
+ ConstantRange::areInsensitiveToSignednessOfICmpPredicate(LHS_CR,
+ RHS_CR)) {
+ ++NumSMinMax;
+ IRBuilder<> B(MM);
+ MM->replaceAllUsesWith(B.CreateBinaryIntrinsic(
+ MM->getIntrinsicID() == Intrinsic::smin ? Intrinsic::umin
+ : Intrinsic::umax,
+ MM->getLHS(), MM->getRHS()));
+ MM->eraseFromParent();
+ return true;
+ }
+
+ return false;
}
// Rewrite this with.overflow intrinsic as non-overflowing.
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 1bf50d79e533..851eab04c8db 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -1221,6 +1221,7 @@ bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
Value::use_iterator I, E, Next;
for (I = V->use_begin(), E = V->use_end(); I != E;) {
Use &U = *I;
+ User *CurUser = U.getUser();
// Some users may see the same pointer operand in multiple operands. Skip
// to the next instruction.
@@ -1231,11 +1232,10 @@ bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
// If V is used as the pointer operand of a compatible memory operation,
// sets the pointer operand to NewV. This replacement does not change
// the element type, so the resultant load/store is still valid.
- U.set(NewV);
+ CurUser->replaceUsesOfWith(V, NewV);
continue;
}
- User *CurUser = U.getUser();
// Skip if the current user is the new value itself.
if (CurUser == NewV)
continue;
@@ -1311,10 +1311,13 @@ bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
while (isa<PHINode>(InsertPos))
++InsertPos;
- U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
+ // This instruction may contain multiple uses of V, update them all.
+ CurUser->replaceUsesOfWith(
+ V, new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
} else {
- U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
- V->getType()));
+ CurUser->replaceUsesOfWith(
+ V, ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
+ V->getType()));
}
}
}
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 627c863f7091..08021f3ba853 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -7033,7 +7033,6 @@ static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE,
// SCEVExpander for both use in preheader and latch
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
SCEVExpander Expander(SE, DL, "lsr_fold_term_cond");
- SCEVExpanderCleaner ExpCleaner(Expander);
assert(Expander.isSafeToExpand(TermValueS) &&
"Terminating value was checked safe in canFoldTerminatingCondition");
@@ -7064,10 +7063,9 @@ static bool ReduceLoopStrength(Loop *L, IVUsers &IU, ScalarEvolution &SE,
BI->setCondition(NewTermCond);
+ Expander.clear();
OldTermCond->eraseFromParent();
DeleteDeadPHIs(L->getHeader(), &TLI, MSSAU.get());
-
- ExpCleaner.markResultUsed();
}
}
diff --git a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp
index f4f3070d11c7..260f31b59ed2 100644
--- a/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp
+++ b/llvm/lib/Transforms/Scalar/SpeculativeExecution.cpp
@@ -291,9 +291,9 @@ bool SpeculativeExecutionPass::considerHoistingFromTo(
InstructionCost TotalSpeculationCost = 0;
unsigned NotHoistedInstCount = 0;
for (const auto &I : FromBlock) {
- // Make note of any DPValues that need hoisting.
- for (DbgRecord &DR : I.getDbgValueRange()) {
- DPValue &DPV = cast<DPValue>(DR);
+ // Make note of any DPValues that need hoisting. DPLabels
+ // get left behind just like llvm.dbg.labels.
+ for (DPValue &DPV : DPValue::filter(I.getDbgValueRange())) {
if (HasNoUnhoistedInstr(DPV.location_ops()))
DPValuesToHoist[DPV.getInstruction()].push_back(&DPV);
}
diff --git a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
index 7fd6759a61fb..5bb109a04ff1 100644
--- a/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/llvm/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -386,7 +386,15 @@ static bool DPValuesRemoveRedundantDbgInstrsUsingBackwardScan(BasicBlock *BB) {
SmallVector<DPValue *, 8> ToBeRemoved;
SmallDenseSet<DebugVariable> VariableSet;
for (auto &I : reverse(*BB)) {
- for (DPValue &DPV : reverse(DPValue::filter(I.getDbgValueRange()))) {
+ for (DbgRecord &DR : reverse(I.getDbgValueRange())) {
+ if (isa<DPLabel>(DR)) {
+ // Emulate existing behaviour (see comment below for dbg.declares).
+ // FIXME: Don't do this.
+ VariableSet.clear();
+ continue;
+ }
+
+ DPValue &DPV = cast<DPValue>(DR);
// Skip declare-type records, as the debug intrinsic method only works
// on dbg.value intrinsics.
if (DPV.getType() == DPValue::LocationType::Declare) {
diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
index 8ebcf0c04fd5..bab065153f3e 100644
--- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -1585,8 +1585,30 @@ static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
return cast<DILocalVariable>(NewVar);
};
- auto UpdateDPValuesOnInst = [&](Instruction &I) -> void {
- for (DPValue &DPV : DPValue::filter(I.getDbgValueRange())) {
+ auto UpdateDbgLabel = [&](auto *LabelRecord) {
+ // Point the label record to a fresh label within the new function if
+ // the record was not inlined from some other function.
+ if (LabelRecord->getDebugLoc().getInlinedAt())
+ return;
+ DILabel *OldLabel = LabelRecord->getLabel();
+ DINode *&NewLabel = RemappedMetadata[OldLabel];
+ if (!NewLabel) {
+ DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
+ *OldLabel->getScope(), *NewSP, Ctx, Cache);
+ NewLabel = DILabel::get(Ctx, NewScope, OldLabel->getName(),
+ OldLabel->getFile(), OldLabel->getLine());
+ }
+ LabelRecord->setLabel(cast<DILabel>(NewLabel));
+ };
+
+ auto UpdateDbgRecordsOnInst = [&](Instruction &I) -> void {
+ for (DbgRecord &DR : I.getDbgValueRange()) {
+ if (DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
+ UpdateDbgLabel(DPL);
+ continue;
+ }
+
+ DPValue &DPV = cast<DPValue>(DR);
// Apply the two updates that dbg.values get: invalid operands, and
// variable metadata fixup.
if (any_of(DPV.location_ops(), IsInvalidLocation)) {
@@ -1599,13 +1621,11 @@ static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
}
if (!DPV.getDebugLoc().getInlinedAt())
DPV.setVariable(GetUpdatedDIVariable(DPV.getVariable()));
- DPV.setDebugLoc(DebugLoc::replaceInlinedAtSubprogram(DPV.getDebugLoc(),
- *NewSP, Ctx, Cache));
}
};
for (Instruction &I : instructions(NewFunc)) {
- UpdateDPValuesOnInst(I);
+ UpdateDbgRecordsOnInst(I);
auto *DII = dyn_cast<DbgInfoIntrinsic>(&I);
if (!DII)
@@ -1614,17 +1634,7 @@ static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
// Point the intrinsic to a fresh label within the new function if the
// intrinsic was not inlined from some other function.
if (auto *DLI = dyn_cast<DbgLabelInst>(&I)) {
- if (DLI->getDebugLoc().getInlinedAt())
- continue;
- DILabel *OldLabel = DLI->getLabel();
- DINode *&NewLabel = RemappedMetadata[OldLabel];
- if (!NewLabel) {
- DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
- *OldLabel->getScope(), *NewSP, Ctx, Cache);
- NewLabel = DILabel::get(Ctx, NewScope, OldLabel->getName(),
- OldLabel->getFile(), OldLabel->getLine());
- }
- DLI->setArgOperand(0, MetadataAsValue::get(Ctx, NewLabel));
+ UpdateDbgLabel(DLI);
continue;
}
@@ -1658,6 +1668,9 @@ static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
if (const DebugLoc &DL = I.getDebugLoc())
I.setDebugLoc(
DebugLoc::replaceInlinedAtSubprogram(DL, *NewSP, Ctx, Cache));
+ for (DbgRecord &DR : I.getDbgValueRange())
+ DR.setDebugLoc(DebugLoc::replaceInlinedAtSubprogram(DR.getDebugLoc(),
+ *NewSP, Ctx, Cache));
// Loop info metadata may contain line locations. Fix them up.
auto updateLoopInfoLoc = [&Ctx, &Cache, NewSP](Metadata *MD) -> Metadata * {
diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
index 08fdd3b75ffc..2ff7c0151076 100644
--- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
+++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
@@ -111,8 +111,7 @@ Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
void StackInfoBuilder::visit(Instruction &Inst) {
// Visit non-intrinsic debug-info records attached to Inst.
- for (DbgRecord &DR : Inst.getDbgValueRange()) {
- DPValue &DPV = cast<DPValue>(DR);
+ for (DPValue &DPV : DPValue::filter(Inst.getDbgValueRange())) {
auto AddIfInteresting = [&](Value *V) {
if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
if (!isInterestingAlloca(*AI))
diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp
index 6e46469f5a60..91ab2795a4b9 100644
--- a/llvm/lib/Transforms/Utils/ValueMapper.cpp
+++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp
@@ -538,6 +538,11 @@ Value *Mapper::mapValue(const Value *V) {
}
void Mapper::remapDPValue(DbgRecord &DR) {
+ if (DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
+ DPL->setLabel(cast<DILabel>(mapMetadata(DPL->getLabel())));
+ return;
+ }
+
DPValue &V = cast<DPValue>(DR);
// Remap variables and DILocations.
auto *MappedVar = mapMetadata(V.getVariable());
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 4e334748c959..de4e56ff8065 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -2422,18 +2422,25 @@ private:
/// \param TE Tree entry checked for permutation.
/// \param VL List of scalars (a subset of the TE scalar), checked for
/// permutations. Must form single-register vector.
+ /// \param ForOrder Tries to fetch the best candidates for ordering info. Also
+ /// commands to build the mask using the original vector value, without
+ /// relying on the potential reordering.
/// \returns ShuffleKind, if gathered values can be represented as shuffles of
/// previous tree entries. \p Part of \p Mask is filled with the shuffle mask.
std::optional<TargetTransformInfo::ShuffleKind>
isGatherShuffledSingleRegisterEntry(
const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask,
- SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part);
+ SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part,
+ bool ForOrder);
/// Checks if the gathered \p VL can be represented as multi-register
/// shuffle(s) of previous tree entries.
/// \param TE Tree entry checked for permutation.
/// \param VL List of scalars (a subset of the TE scalar), checked for
/// permutations.
+ /// \param ForOrder Tries to fetch the best candidates for ordering info. Also
+ /// commands to build the mask using the original vector value, without
+ /// relying on the potential reordering.
/// \returns per-register series of ShuffleKind, if gathered values can be
/// represented as shuffles of previous tree entries. \p Mask is filled with
/// the shuffle mask (also on per-register base).
@@ -2441,7 +2448,7 @@ private:
isGatherShuffledEntry(
const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask,
SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries,
- unsigned NumParts);
+ unsigned NumParts, bool ForOrder = false);
/// \returns the scalarization cost for this list of values. Assuming that
/// this subtree gets vectorized, we may need to extract the values from the
@@ -3788,65 +3795,163 @@ static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask,
std::optional<BoUpSLP::OrdersType>
BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
assert(TE.State == TreeEntry::NeedToGather && "Expected gather node only.");
- unsigned NumScalars = TE.Scalars.size();
+ // Try to find subvector extract/insert patterns and reorder only such
+ // patterns.
+ SmallVector<Value *> GatheredScalars(TE.Scalars.begin(), TE.Scalars.end());
+ Type *ScalarTy = GatheredScalars.front()->getType();
+ int NumScalars = GatheredScalars.size();
+ if (!isValidElementType(ScalarTy))
+ return std::nullopt;
+ auto *VecTy = FixedVectorType::get(ScalarTy, NumScalars);
+ int NumParts = TTI->getNumberOfParts(VecTy);
+ if (NumParts == 0 || NumParts >= NumScalars)
+ NumParts = 1;
+ SmallVector<int> ExtractMask;
+ SmallVector<int> Mask;
+ SmallVector<SmallVector<const TreeEntry *>> Entries;
+ SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> ExtractShuffles =
+ tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts);
+ SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles =
+ isGatherShuffledEntry(&TE, GatheredScalars, Mask, Entries, NumParts,
+ /*ForOrder=*/true);
+ // No shuffled operands - ignore.
+ if (GatherShuffles.empty() && ExtractShuffles.empty())
+ return std::nullopt;
OrdersType CurrentOrder(NumScalars, NumScalars);
- SmallVector<int> Positions;
- SmallBitVector UsedPositions(NumScalars);
- const TreeEntry *STE = nullptr;
- // Try to find all gathered scalars that are gets vectorized in other
- // vectorize node. Here we can have only one single tree vector node to
- // correctly identify order of the gathered scalars.
- for (unsigned I = 0; I < NumScalars; ++I) {
- Value *V = TE.Scalars[I];
- if (!isa<LoadInst, ExtractElementInst, ExtractValueInst>(V))
- continue;
- if (const auto *LocalSTE = getTreeEntry(V)) {
- if (!STE)
- STE = LocalSTE;
- else if (STE != LocalSTE)
- // Take the order only from the single vector node.
- return std::nullopt;
- unsigned Lane =
- std::distance(STE->Scalars.begin(), find(STE->Scalars, V));
- if (Lane >= NumScalars)
- return std::nullopt;
- if (CurrentOrder[Lane] != NumScalars) {
- if (Lane != I)
+ if (GatherShuffles.size() == 1 &&
+ *GatherShuffles.front() == TTI::SK_PermuteSingleSrc &&
+ Entries.front().front()->isSame(TE.Scalars)) {
+ // Perfect match in the graph, will reuse the previously vectorized
+ // node. Cost is 0.
+ std::iota(CurrentOrder.begin(), CurrentOrder.end(), 0);
+ return CurrentOrder;
+ }
+ auto IsSplatMask = [](ArrayRef<int> Mask) {
+ int SingleElt = PoisonMaskElem;
+ return all_of(Mask, [&](int I) {
+ if (SingleElt == PoisonMaskElem && I != PoisonMaskElem)
+ SingleElt = I;
+ return I == PoisonMaskElem || I == SingleElt;
+ });
+ };
+ // Exclusive broadcast mask - ignore.
+ if ((ExtractShuffles.empty() && IsSplatMask(Mask) &&
+ (Entries.size() != 1 ||
+ Entries.front().front()->ReorderIndices.empty())) ||
+ (GatherShuffles.empty() && IsSplatMask(ExtractMask)))
+ return std::nullopt;
+ SmallBitVector ShuffledSubMasks(NumParts);
+ auto TransformMaskToOrder = [&](MutableArrayRef<unsigned> CurrentOrder,
+ ArrayRef<int> Mask, int PartSz, int NumParts,
+ function_ref<unsigned(unsigned)> GetVF) {
+ for (int I : seq<int>(0, NumParts)) {
+ if (ShuffledSubMasks.test(I))
+ continue;
+ const int VF = GetVF(I);
+ if (VF == 0)
+ continue;
+ MutableArrayRef<unsigned> Slice = CurrentOrder.slice(I * PartSz, PartSz);
+ // Shuffle of at least 2 vectors - ignore.
+ if (any_of(Slice, [&](int I) { return I != NumScalars; })) {
+ std::fill(Slice.begin(), Slice.end(), NumScalars);
+ ShuffledSubMasks.set(I);
+ continue;
+ }
+ // Try to include as much elements from the mask as possible.
+ int FirstMin = INT_MAX;
+ int SecondVecFound = false;
+ for (int K : seq<int>(0, PartSz)) {
+ int Idx = Mask[I * PartSz + K];
+ if (Idx == PoisonMaskElem) {
+ Value *V = GatheredScalars[I * PartSz + K];
+ if (isConstant(V) && !isa<PoisonValue>(V)) {
+ SecondVecFound = true;
+ break;
+ }
continue;
- UsedPositions.reset(CurrentOrder[Lane]);
+ }
+ if (Idx < VF) {
+ if (FirstMin > Idx)
+ FirstMin = Idx;
+ } else {
+ SecondVecFound = true;
+ break;
+ }
}
- // The partial identity (where only some elements of the gather node are
- // in the identity order) is good.
- CurrentOrder[Lane] = I;
- UsedPositions.set(I);
- }
- }
- // Need to keep the order if we have a vector entry and at least 2 scalars or
- // the vectorized entry has just 2 scalars.
- if (STE && (UsedPositions.count() > 1 || STE->Scalars.size() == 2)) {
- auto &&IsIdentityOrder = [NumScalars](ArrayRef<unsigned> CurrentOrder) {
- for (unsigned I = 0; I < NumScalars; ++I)
- if (CurrentOrder[I] != I && CurrentOrder[I] != NumScalars)
- return false;
- return true;
- };
- if (IsIdentityOrder(CurrentOrder))
- return OrdersType();
- auto *It = CurrentOrder.begin();
- for (unsigned I = 0; I < NumScalars;) {
- if (UsedPositions.test(I)) {
- ++I;
+ FirstMin = (FirstMin / PartSz) * PartSz;
+ // Shuffle of at least 2 vectors - ignore.
+ if (SecondVecFound) {
+ std::fill(Slice.begin(), Slice.end(), NumScalars);
+ ShuffledSubMasks.set(I);
continue;
}
- if (*It == NumScalars) {
- *It = I;
- ++I;
+ for (int K : seq<int>(0, PartSz)) {
+ int Idx = Mask[I * PartSz + K];
+ if (Idx == PoisonMaskElem)
+ continue;
+ Idx -= FirstMin;
+ if (Idx >= PartSz) {
+ SecondVecFound = true;
+ break;
+ }
+ if (CurrentOrder[I * PartSz + Idx] >
+ static_cast<unsigned>(I * PartSz + K) &&
+ CurrentOrder[I * PartSz + Idx] !=
+ static_cast<unsigned>(I * PartSz + Idx))
+ CurrentOrder[I * PartSz + Idx] = I * PartSz + K;
+ }
+ // Shuffle of at least 2 vectors - ignore.
+ if (SecondVecFound) {
+ std::fill(Slice.begin(), Slice.end(), NumScalars);
+ ShuffledSubMasks.set(I);
+ continue;
}
- ++It;
}
- return std::move(CurrentOrder);
+ };
+ int PartSz = NumScalars / NumParts;
+ if (!ExtractShuffles.empty())
+ TransformMaskToOrder(
+ CurrentOrder, ExtractMask, PartSz, NumParts, [&](unsigned I) {
+ if (!ExtractShuffles[I])
+ return 0U;
+ unsigned VF = 0;
+ for (unsigned Idx : seq<unsigned>(0, PartSz)) {
+ int K = I * PartSz + Idx;
+ if (ExtractMask[K] == PoisonMaskElem)
+ continue;
+ if (!TE.ReuseShuffleIndices.empty())
+ K = TE.ReuseShuffleIndices[K];
+ if (!TE.ReorderIndices.empty())
+ K = std::distance(TE.ReorderIndices.begin(),
+ find(TE.ReorderIndices, K));
+ auto *EI = dyn_cast<ExtractElementInst>(TE.Scalars[K]);
+ if (!EI)
+ continue;
+ VF = std::max(VF, cast<VectorType>(EI->getVectorOperandType())
+ ->getElementCount()
+ .getKnownMinValue());
+ }
+ return VF;
+ });
+ // Check special corner case - single shuffle of the same entry.
+ if (GatherShuffles.size() == 1 && NumParts != 1) {
+ if (ShuffledSubMasks.any())
+ return std::nullopt;
+ PartSz = NumScalars;
+ NumParts = 1;
}
- return std::nullopt;
+ if (!Entries.empty())
+ TransformMaskToOrder(CurrentOrder, Mask, PartSz, NumParts, [&](unsigned I) {
+ if (!GatherShuffles[I])
+ return 0U;
+ return std::max(Entries[I].front()->getVectorFactor(),
+ Entries[I].back()->getVectorFactor());
+ });
+ int NumUndefs =
+ count_if(CurrentOrder, [&](int Idx) { return Idx == NumScalars; });
+ if (ShuffledSubMasks.all() || (NumScalars > 2 && NumUndefs >= NumScalars / 2))
+ return std::nullopt;
+ return std::move(CurrentOrder);
}
namespace {
@@ -4168,9 +4273,59 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
// 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because
// element 3 is used twice in the second submask.
unsigned Sz = TE.Scalars.size();
- if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
- Sz))
+ if (TE.State == TreeEntry::NeedToGather) {
+ if (std::optional<OrdersType> CurrentOrder =
+ findReusedOrderedScalars(TE)) {
+ SmallVector<int> Mask;
+ fixupOrderingIndices(*CurrentOrder);
+ inversePermutation(*CurrentOrder, Mask);
+ ::addMask(Mask, TE.ReuseShuffleIndices);
+ OrdersType Res(TE.getVectorFactor(), TE.getVectorFactor());
+ unsigned Sz = TE.Scalars.size();
+ for (int K = 0, E = TE.getVectorFactor() / Sz; K < E; ++K) {
+ for (auto [I, Idx] : enumerate(ArrayRef(Mask).slice(K * Sz, Sz)))
+ if (Idx != PoisonMaskElem)
+ Res[Idx + K * Sz] = I + K * Sz;
+ }
+ return std::move(Res);
+ }
+ }
+ if (Sz == 2 && TE.getVectorFactor() == 4 &&
+ TTI->getNumberOfParts(FixedVectorType::get(
+ TE.Scalars.front()->getType(), 2 * TE.getVectorFactor())) == 1)
return std::nullopt;
+ if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
+ Sz)) {
+ SmallVector<int> ReorderMask(Sz, PoisonMaskElem);
+ if (TE.ReorderIndices.empty())
+ std::iota(ReorderMask.begin(), ReorderMask.end(), 0);
+ else
+ inversePermutation(TE.ReorderIndices, ReorderMask);
+ ::addMask(ReorderMask, TE.ReuseShuffleIndices);
+ unsigned VF = ReorderMask.size();
+ OrdersType ResOrder(VF, VF);
+ unsigned NumParts = VF / Sz;
+ SmallBitVector UsedVals(NumParts);
+ for (unsigned I = 0; I < VF; I += Sz) {
+ int Val = PoisonMaskElem;
+ unsigned UndefCnt = 0;
+ if (any_of(ArrayRef(ReorderMask).slice(I, Sz),
+ [&](int Idx) {
+ if (Val == PoisonMaskElem && Idx != PoisonMaskElem)
+ Val = Idx;
+ if (Idx == PoisonMaskElem)
+ ++UndefCnt;
+ return Idx != PoisonMaskElem && Idx != Val;
+ }) ||
+ Val >= static_cast<int>(NumParts) || UsedVals.test(Val) ||
+ UndefCnt > Sz / 2)
+ return std::nullopt;
+ UsedVals.set(Val);
+ for (unsigned K = 0; K < NumParts; ++K)
+ ResOrder[Val + Sz * K] = I + K;
+ }
+ return std::move(ResOrder);
+ }
unsigned VF = TE.getVectorFactor();
// Try build correct order for extractelement instructions.
SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(),
@@ -4208,7 +4363,8 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; });
std::advance(It, Sz);
}
- if (all_of(enumerate(ResOrder),
+ if (TE.State == TreeEntry::NeedToGather &&
+ all_of(enumerate(ResOrder),
[](const auto &Data) { return Data.index() == Data.value(); }))
return std::nullopt; // No need to reorder.
return std::move(ResOrder);
@@ -4298,11 +4454,8 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
OrdersType CurrentOrder;
bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder,
/*ResizeAllowed=*/true);
- if (Reuse || !CurrentOrder.empty()) {
- if (!CurrentOrder.empty())
- fixupOrderingIndices(CurrentOrder);
+ if (Reuse || !CurrentOrder.empty())
return std::move(CurrentOrder);
- }
}
// If the gather node is <undef, v, .., poison> and
// insertelement poison, v, 0 [+ permute]
@@ -4335,8 +4488,11 @@ BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
InstructionCost InsertIdxCost = TTI->getVectorInstrCost(
Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx,
PoisonValue::get(Ty), *It);
- if (InsertFirstCost + PermuteCost < InsertIdxCost)
+ if (InsertFirstCost + PermuteCost < InsertIdxCost) {
+ OrdersType Order(Sz, Sz);
+ Order[Idx] = 0;
return std::move(Order);
+ }
}
}
if (isSplat(TE.Scalars))
@@ -4392,6 +4548,28 @@ void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const {
std::iota(It, std::next(It, Sz), 0);
}
+static void combineOrders(MutableArrayRef<unsigned> Order,
+ ArrayRef<unsigned> SecondaryOrder) {
+ assert((SecondaryOrder.empty() || Order.size() == SecondaryOrder.size()) &&
+ "Expected same size of orders");
+ unsigned Sz = Order.size();
+ SmallBitVector UsedIndices(Sz);
+ for (unsigned Idx : seq<unsigned>(0, Sz)) {
+ if (Order[Idx] != Sz)
+ UsedIndices.set(Order[Idx]);
+ }
+ if (SecondaryOrder.empty()) {
+ for (unsigned Idx : seq<unsigned>(0, Sz))
+ if (Order[Idx] == Sz && !UsedIndices.test(Idx))
+ Order[Idx] = Idx;
+ } else {
+ for (unsigned Idx : seq<unsigned>(0, Sz))
+ if (SecondaryOrder[Idx] != Sz && Order[Idx] == Sz &&
+ !UsedIndices.test(SecondaryOrder[Idx]))
+ Order[Idx] = SecondaryOrder[Idx];
+ }
+}
+
void BoUpSLP::reorderTopToBottom() {
// Maps VF to the graph nodes.
DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
@@ -4560,18 +4738,46 @@ void BoUpSLP::reorderTopToBottom() {
}
if (OrdersUses.empty())
continue;
+ auto IsIdentityOrder = [](ArrayRef<unsigned> Order) {
+ const unsigned Sz = Order.size();
+ for (unsigned Idx : seq<unsigned>(0, Sz))
+ if (Idx != Order[Idx] && Order[Idx] != Sz)
+ return false;
+ return true;
+ };
// Choose the most used order.
- ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
- unsigned Cnt = OrdersUses.front().second;
- for (const auto &Pair : drop_begin(OrdersUses)) {
- if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
+ unsigned IdentityCnt = 0;
+ unsigned FilledIdentityCnt = 0;
+ OrdersType IdentityOrder(VF, VF);
+ for (auto &Pair : OrdersUses) {
+ if (Pair.first.empty() || IsIdentityOrder(Pair.first)) {
+ if (!Pair.first.empty())
+ FilledIdentityCnt += Pair.second;
+ IdentityCnt += Pair.second;
+ combineOrders(IdentityOrder, Pair.first);
+ }
+ }
+ MutableArrayRef<unsigned> BestOrder = IdentityOrder;
+ unsigned Cnt = IdentityCnt;
+ for (auto &Pair : OrdersUses) {
+ // Prefer identity order. But, if filled identity found (non-empty order)
+ // with same number of uses, as the new candidate order, we can choose
+ // this candidate order.
+ if (Cnt < Pair.second ||
+ (Cnt == IdentityCnt && IdentityCnt == FilledIdentityCnt &&
+ Cnt == Pair.second && !BestOrder.empty() &&
+ IsIdentityOrder(BestOrder))) {
+ combineOrders(Pair.first, BestOrder);
BestOrder = Pair.first;
Cnt = Pair.second;
+ } else {
+ combineOrders(BestOrder, Pair.first);
}
}
// Set order of the user node.
- if (BestOrder.empty())
+ if (IsIdentityOrder(BestOrder))
continue;
+ fixupOrderingIndices(BestOrder);
SmallVector<int> Mask;
inversePermutation(BestOrder, Mask);
SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem);
@@ -4685,7 +4891,7 @@ bool BoUpSLP::canReorderOperands(
void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
SetVector<TreeEntry *> OrderedEntries;
- DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
+ DenseSet<const TreeEntry *> GathersToOrders;
// Find all reorderable leaf nodes with the given VF.
// Currently the are vectorized loads,extracts without alternate operands +
// some gathering of extracts.
@@ -4700,7 +4906,7 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
if (!(TE->State == TreeEntry::Vectorize ||
TE->State == TreeEntry::StridedVectorize) ||
!TE->ReuseShuffleIndices.empty())
- GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
+ GathersToOrders.insert(TE.get());
}
}
@@ -4718,7 +4924,7 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
if (!(TE->State == TreeEntry::Vectorize ||
TE->State == TreeEntry::StridedVectorize ||
(TE->State == TreeEntry::NeedToGather &&
- GathersToOrders.count(TE))) ||
+ GathersToOrders.contains(TE))) ||
TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
!all_of(drop_begin(TE->UserTreeIndices),
[TE](const EdgeInfo &EI) {
@@ -4775,9 +4981,14 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
const auto Order = [&]() -> const OrdersType {
if (OpTE->State == TreeEntry::NeedToGather ||
!OpTE->ReuseShuffleIndices.empty())
- return GathersToOrders.find(OpTE)->second;
+ return getReorderingData(*OpTE, /*TopToBottom=*/false)
+ .value_or(OrdersType(1));
return OpTE->ReorderIndices;
}();
+ // The order is partially ordered, skip it in favor of fully non-ordered
+ // orders.
+ if (Order.size() == 1)
+ continue;
unsigned NumOps = count_if(
Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
return P.second == OpTE;
@@ -4805,9 +5016,10 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
(IgnoreReorder && TE->Idx == 0))
return true;
if (TE->State == TreeEntry::NeedToGather) {
- auto It = GathersToOrders.find(TE);
- if (It != GathersToOrders.end())
- return !It->second.empty();
+ if (GathersToOrders.contains(TE))
+ return !getReorderingData(*TE, /*TopToBottom=*/false)
+ .value_or(OrdersType(1))
+ .empty();
return true;
}
return false;
@@ -4839,21 +5051,49 @@ void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
++Res.first->second;
}
}
- // Choose the best order.
- ArrayRef<unsigned> BestOrder = OrdersUses.front().first;
- unsigned Cnt = OrdersUses.front().second;
- for (const auto &Pair : drop_begin(OrdersUses)) {
- if (Cnt < Pair.second || (Cnt == Pair.second && Pair.first.empty())) {
+ if (OrdersUses.empty()) {
+ for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
+ OrderedEntries.remove(Op.second);
+ continue;
+ }
+ auto IsIdentityOrder = [](ArrayRef<unsigned> Order) {
+ const unsigned Sz = Order.size();
+ for (unsigned Idx : seq<unsigned>(0, Sz))
+ if (Idx != Order[Idx] && Order[Idx] != Sz)
+ return false;
+ return true;
+ };
+ // Choose the most used order.
+ unsigned IdentityCnt = 0;
+ unsigned VF = Data.second.front().second->getVectorFactor();
+ OrdersType IdentityOrder(VF, VF);
+ for (auto &Pair : OrdersUses) {
+ if (Pair.first.empty() || IsIdentityOrder(Pair.first)) {
+ IdentityCnt += Pair.second;
+ combineOrders(IdentityOrder, Pair.first);
+ }
+ }
+ MutableArrayRef<unsigned> BestOrder = IdentityOrder;
+ unsigned Cnt = IdentityCnt;
+ for (auto &Pair : OrdersUses) {
+ // Prefer identity order. But, if filled identity found (non-empty
+ // order) with same number of uses, as the new candidate order, we can
+ // choose this candidate order.
+ if (Cnt < Pair.second) {
+ combineOrders(Pair.first, BestOrder);
BestOrder = Pair.first;
Cnt = Pair.second;
+ } else {
+ combineOrders(BestOrder, Pair.first);
}
}
- // Set order of the user node (reordering of operands and user nodes).
- if (BestOrder.empty()) {
+ // Set order of the user node.
+ if (IsIdentityOrder(BestOrder)) {
for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
OrderedEntries.remove(Op.second);
continue;
}
+ fixupOrderingIndices(BestOrder);
// Erase operands from OrderedEntries list and adjust their orders.
VisitedOps.clear();
SmallVector<int> Mask;
@@ -7472,6 +7712,20 @@ class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
}
V1 = Constant::getNullValue(
FixedVectorType::get(E->Scalars.front()->getType(), CommonVF));
+ // Not identity/broadcast? Try to see if the original vector is better.
+ if (!E->ReorderIndices.empty() && CommonVF == E->ReorderIndices.size() &&
+ CommonVF == CommonMask.size() &&
+ any_of(enumerate(CommonMask),
+ [](const auto &&P) {
+ return P.value() != PoisonMaskElem &&
+ static_cast<unsigned>(P.value()) != P.index();
+ }) &&
+ any_of(CommonMask,
+ [](int Idx) { return Idx != PoisonMaskElem && Idx != 0; })) {
+ SmallVector<int> ReorderMask;
+ inversePermutation(E->ReorderIndices, ReorderMask);
+ ::addMask(CommonMask, ReorderMask);
+ }
} else if (V1 && P2.isNull()) {
// Shuffle single vector.
CommonVF = cast<FixedVectorType>(V1->getType())->getNumElements();
@@ -9433,7 +9687,7 @@ BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL,
std::optional<TargetTransformInfo::ShuffleKind>
BoUpSLP::isGatherShuffledSingleRegisterEntry(
const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask,
- SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part) {
+ SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part, bool ForOrder) {
Entries.clear();
// TODO: currently checking only for Scalars in the tree entry, need to count
// reused elements too for better cost estimation.
@@ -9532,6 +9786,21 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
VToTEs.insert(TEPtr);
}
if (const TreeEntry *VTE = getTreeEntry(V)) {
+ if (ForOrder) {
+ if (VTE->State != TreeEntry::Vectorize) {
+ auto It = MultiNodeScalars.find(V);
+ if (It == MultiNodeScalars.end())
+ continue;
+ VTE = *It->getSecond().begin();
+ // Iterate through all vectorized nodes.
+ auto *MIt = find_if(It->getSecond(), [](const TreeEntry *MTE) {
+ return MTE->State == TreeEntry::Vectorize;
+ });
+ if (MIt == It->getSecond().end())
+ continue;
+ VTE = *MIt;
+ }
+ }
Instruction &LastBundleInst = getLastInstructionInBundle(VTE);
if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst))
continue;
@@ -9765,8 +10034,12 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
// scalar in the list.
for (const std::pair<unsigned, int> &Pair : EntryLanes) {
unsigned Idx = Part * VL.size() + Pair.second;
- Mask[Idx] = Pair.first * VF +
- Entries[Pair.first]->findLaneForValue(VL[Pair.second]);
+ Mask[Idx] =
+ Pair.first * VF +
+ (ForOrder ? std::distance(
+ Entries[Pair.first]->Scalars.begin(),
+ find(Entries[Pair.first]->Scalars, VL[Pair.second]))
+ : Entries[Pair.first]->findLaneForValue(VL[Pair.second]));
IsIdentity &= Mask[Idx] == Pair.second;
}
switch (Entries.size()) {
@@ -9791,8 +10064,8 @@ BoUpSLP::isGatherShuffledSingleRegisterEntry(
SmallVector<std::optional<TargetTransformInfo::ShuffleKind>>
BoUpSLP::isGatherShuffledEntry(
const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask,
- SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries,
- unsigned NumParts) {
+ SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, unsigned NumParts,
+ bool ForOrder) {
assert(NumParts > 0 && NumParts < VL.size() &&
"Expected positive number of registers.");
Entries.clear();
@@ -9810,7 +10083,8 @@ BoUpSLP::isGatherShuffledEntry(
ArrayRef<Value *> SubVL = VL.slice(Part * SliceSize, SliceSize);
SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back();
std::optional<TTI::ShuffleKind> SubRes =
- isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part);
+ isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part,
+ ForOrder);
if (!SubRes)
SubEntries.clear();
Res.push_back(SubRes);
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 240d4bd628b0..a2a203c42005 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -385,9 +385,6 @@ struct VPTransformState {
VPValue2ValueTy VPValue2Value;
- /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
- Value *CanonicalIV = nullptr;
-
/// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
InnerLoopVectorizer *ILV;
diff --git a/llvm/runtimes/CMakeLists.txt b/llvm/runtimes/CMakeLists.txt
index 8c48d85a4346..9b5e758b6ede 100644
--- a/llvm/runtimes/CMakeLists.txt
+++ b/llvm/runtimes/CMakeLists.txt
@@ -199,7 +199,7 @@ foreach(entry ${runtimes})
list(APPEND prefixes "LLVM_LIBC")
list(APPEND prefixes "LIBC_")
# The `libc` project may require '-DCUDAToolkit_ROOT' in GPU mode.
- if(LIBC_GPU_BUILD OR LIBC_GPU_ARCHITECTURES)
+ if(LLVM_LIBC_GPU_BUILD)
list(APPEND prefixes "CUDA")
endif()
endif()
@@ -424,7 +424,7 @@ if(runtimes)
endforeach()
endif()
if("libc" IN_LIST LLVM_ENABLE_PROJECTS AND
- (LLVM_LIBC_FULL_BUILD OR LIBC_GPU_BUILD OR LIBC_GPU_ARCHITECTURES))
+ (LLVM_LIBC_FULL_BUILD OR LLVM_LIBC_GPU_BUILD))
if(LIBC_HDRGEN_EXE)
set(hdrgen_exe ${LIBC_HDRGEN_EXE})
else()
@@ -441,7 +441,12 @@ if(runtimes)
set(libc_cmake_args "-DLIBC_HDRGEN_EXE=${hdrgen_exe}"
"-DLLVM_LIBC_FULL_BUILD=ON")
list(APPEND extra_deps ${hdrgen_deps})
- if(LIBC_GPU_BUILD OR LIBC_GPU_ARCHITECTURES)
+ if(LLVM_LIBC_GPU_BUILD)
+ list(APPEND libc_cmake_args "-DLLVM_LIBC_GPU_BUILD=ON")
+ # The `libc` project may require '-DCUDAToolkit_ROOT' in GPU mode.
+ if(CUDAToolkit_ROOT)
+ list(APPEND libc_cmake_args "-DCUDAToolkit_ROOT=${CUDAToolkit_ROOT}")
+ endif()
foreach(dep clang-offload-packager nvptx-arch amdgpu-arch)
if(TARGET ${dep})
list(APPEND extra_deps ${dep})
diff --git a/llvm/test/Analysis/CostModel/AArch64/arith-fp-frem.ll b/llvm/test/Analysis/CostModel/AArch64/arith-fp-frem.ll
index 20e0ef7ea342..63149adfa215 100644
--- a/llvm/test/Analysis/CostModel/AArch64/arith-fp-frem.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/arith-fp-frem.ll
@@ -22,44 +22,44 @@ target triple = "aarch64-unknown-linux-gnu"
define void @frem_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; NEON-NO-VECLIB-LABEL: 'frem_f64'
-; NEON-NO-VECLIB: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; NEON-NO-VECLIB: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; NEON-NO-VECLIB: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; NEON-NO-VECLIB: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem double %in, %in
;
; SVE-NO-VECLIB-LABEL: 'frem_f64'
-; SVE-NO-VECLIB: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; SVE-NO-VECLIB: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; SVE-NO-VECLIB: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; SVE-NO-VECLIB: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem double %in, %in
; SVE-NO-VECLIB: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem double %in, %in
; SVE-NO-VECLIB: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %res = frem double %in, %in
;
; NEON-ARMPL-LABEL: 'frem_f64'
-; NEON-ARMPL: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; NEON-ARMPL: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; NEON-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; NEON-ARMPL: LV: Found an estimated cost of 10 for VF 2 For instruction: %res = frem double %in, %in
;
; NEON-SLEEF-LABEL: 'frem_f64'
-; NEON-SLEEF: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; NEON-SLEEF: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; NEON-SLEEF: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; NEON-SLEEF: LV: Found an estimated cost of 10 for VF 2 For instruction: %res = frem double %in, %in
;
; SVE-ARMPL-LABEL: 'frem_f64'
-; SVE-ARMPL: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; SVE-ARMPL: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; SVE-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; SVE-ARMPL: LV: Found an estimated cost of 10 for VF 2 For instruction: %res = frem double %in, %in
; SVE-ARMPL: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem double %in, %in
; SVE-ARMPL: LV: Found an estimated cost of 10 for VF vscale x 2 For instruction: %res = frem double %in, %in
;
; SVE-SLEEF-LABEL: 'frem_f64'
-; SVE-SLEEF: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; SVE-SLEEF: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; SVE-SLEEF: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; SVE-SLEEF: LV: Found an estimated cost of 10 for VF 2 For instruction: %res = frem double %in, %in
; SVE-SLEEF: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem double %in, %in
; SVE-SLEEF: LV: Found an estimated cost of 10 for VF vscale x 2 For instruction: %res = frem double %in, %in
;
; SVE-ARMPL-TAILFOLD-LABEL: 'frem_f64'
-; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 10 for VF 2 For instruction: %res = frem double %in, %in
; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem double %in, %in
; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 10 for VF vscale x 2 For instruction: %res = frem double %in, %in
;
; SVE-SLEEF-TAILFOLD-LABEL: 'frem_f64'
-; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem double %in, %in
-; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem double %in, %in
+; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem double %in, %in
+; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 10 for VF 2 For instruction: %res = frem double %in, %in
; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem double %in, %in
; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 10 for VF vscale x 2 For instruction: %res = frem double %in, %in
;
@@ -83,55 +83,55 @@ define void @frem_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
define void @frem_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
; NEON-NO-VECLIB-LABEL: 'frem_f32'
-; NEON-NO-VECLIB: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; NEON-NO-VECLIB: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
-; NEON-NO-VECLIB: LV: Found an estimated cost of 20 for VF 4 For instruction: %res = frem float %in, %in
+; NEON-NO-VECLIB: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; NEON-NO-VECLIB: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
+; NEON-NO-VECLIB: LV: Found an estimated cost of 52 for VF 4 For instruction: %res = frem float %in, %in
;
; SVE-NO-VECLIB-LABEL: 'frem_f32'
-; SVE-NO-VECLIB: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; SVE-NO-VECLIB: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
-; SVE-NO-VECLIB: LV: Found an estimated cost of 20 for VF 4 For instruction: %res = frem float %in, %in
+; SVE-NO-VECLIB: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; SVE-NO-VECLIB: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
+; SVE-NO-VECLIB: LV: Found an estimated cost of 52 for VF 4 For instruction: %res = frem float %in, %in
; SVE-NO-VECLIB: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem float %in, %in
; SVE-NO-VECLIB: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %res = frem float %in, %in
; SVE-NO-VECLIB: LV: Found an estimated cost of Invalid for VF vscale x 4 For instruction: %res = frem float %in, %in
;
; NEON-ARMPL-LABEL: 'frem_f32'
-; NEON-ARMPL: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; NEON-ARMPL: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
+; NEON-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; NEON-ARMPL: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
; NEON-ARMPL: LV: Found an estimated cost of 10 for VF 4 For instruction: %res = frem float %in, %in
;
; NEON-SLEEF-LABEL: 'frem_f32'
-; NEON-SLEEF: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; NEON-SLEEF: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
+; NEON-SLEEF: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; NEON-SLEEF: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
; NEON-SLEEF: LV: Found an estimated cost of 10 for VF 4 For instruction: %res = frem float %in, %in
;
; SVE-ARMPL-LABEL: 'frem_f32'
-; SVE-ARMPL: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; SVE-ARMPL: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
+; SVE-ARMPL: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; SVE-ARMPL: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
; SVE-ARMPL: LV: Found an estimated cost of 10 for VF 4 For instruction: %res = frem float %in, %in
; SVE-ARMPL: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem float %in, %in
; SVE-ARMPL: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %res = frem float %in, %in
; SVE-ARMPL: LV: Found an estimated cost of 10 for VF vscale x 4 For instruction: %res = frem float %in, %in
;
; SVE-SLEEF-LABEL: 'frem_f32'
-; SVE-SLEEF: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; SVE-SLEEF: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
+; SVE-SLEEF: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; SVE-SLEEF: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
; SVE-SLEEF: LV: Found an estimated cost of 10 for VF 4 For instruction: %res = frem float %in, %in
; SVE-SLEEF: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem float %in, %in
; SVE-SLEEF: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %res = frem float %in, %in
; SVE-SLEEF: LV: Found an estimated cost of 10 for VF vscale x 4 For instruction: %res = frem float %in, %in
;
; SVE-ARMPL-TAILFOLD-LABEL: 'frem_f32'
-; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
+; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 10 for VF 4 For instruction: %res = frem float %in, %in
; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem float %in, %in
; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %res = frem float %in, %in
; SVE-ARMPL-TAILFOLD: LV: Found an estimated cost of 10 for VF vscale x 4 For instruction: %res = frem float %in, %in
;
; SVE-SLEEF-TAILFOLD-LABEL: 'frem_f32'
-; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 2 for VF 1 For instruction: %res = frem float %in, %in
-; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 8 for VF 2 For instruction: %res = frem float %in, %in
+; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 10 for VF 1 For instruction: %res = frem float %in, %in
+; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 24 for VF 2 For instruction: %res = frem float %in, %in
; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of 10 for VF 4 For instruction: %res = frem float %in, %in
; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of Invalid for VF vscale x 1 For instruction: %res = frem float %in, %in
; SVE-SLEEF-TAILFOLD: LV: Found an estimated cost of Invalid for VF vscale x 2 For instruction: %res = frem float %in, %in
diff --git a/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll b/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll
index c352892354fc..497ade4f2f61 100644
--- a/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/arith-fp.ll
@@ -197,17 +197,17 @@ define i32 @fdiv(i32 %arg) {
define i32 @frem(i32 %arg) {
; CHECK-LABEL: 'frem'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4F16 = frem <4 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %V8F16 = frem <8 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V16F16 = frem <16 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2F32 = frem <2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4F32 = frem <4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8F32 = frem <8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V2F64 = frem <2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V4F64 = frem <4 x double> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %F16 = frem half undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V4F16 = frem <4 x half> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %V8F16 = frem <8 x half> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 216 for instruction: %V16F16 = frem <16 x half> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %F32 = frem float undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V2F32 = frem <2 x float> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V4F32 = frem <4 x float> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 104 for instruction: %V8F32 = frem <8 x float> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %F64 = frem double undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V2F64 = frem <2 x double> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4F64 = frem <4 x double> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
%F16 = frem half undef, undef
diff --git a/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll b/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll
index 2af1309cab7e..d3cec77982af 100644
--- a/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll
+++ b/llvm/test/Analysis/ScalarEvolution/exit-count-select-safe.ll
@@ -177,9 +177,9 @@ define i32 @logical_or_3ops_redundant_uminseq_operand(i32 %n, i32 %m, i32 %k) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %n, i32 %m)
; CHECK-NEXT: --> (%n umin %m) U: full-set S: full-set Exits: (%n umin %m) LoopDispositions: { %loop: Invariant }
; CHECK-NEXT: %cond_p3 = select i1 %cond_p0, i1 true, i1 %cond_p1
-; CHECK-NEXT: --> (true + ((true + %cond_p0) umin_seq (true + %cond_p1))) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
+; CHECK-NEXT: --> (true + ((true + %cond_p0) umin (true + %cond_p1))) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
; CHECK-NEXT: %cond = select i1 %cond_p3, i1 true, i1 %cond_p2
-; CHECK-NEXT: --> (true + ((true + %cond_p0) umin_seq (true + %cond_p1) umin_seq (true + %cond_p2))) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
+; CHECK-NEXT: --> (true + (((true + %cond_p0) umin (true + %cond_p1)) umin_seq (true + %cond_p2))) U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %loop: Variant }
; CHECK-NEXT: Determining loop execution counts for: @logical_or_3ops_redundant_uminseq_operand
; CHECK-NEXT: Loop %loop: backedge-taken count is ((%n umin %m) umin_seq %k)
; CHECK-NEXT: Loop %loop: constant max backedge-taken count is -1
diff --git a/llvm/test/Bitcode/constant-splat.ll b/llvm/test/Bitcode/constant-splat.ll
new file mode 100644
index 000000000000..2bcc3ddf3e4f
--- /dev/null
+++ b/llvm/test/Bitcode/constant-splat.ll
@@ -0,0 +1,76 @@
+; RUN: llvm-as -use-constant-int-for-fixed-length-splat \
+; RUN: -use-constant-fp-for-fixed-length-splat \
+; RUN: -use-constant-int-for-scalable-splat \
+; RUN: -use-constant-fp-for-scalable-splat \
+; RUN: < %s | llvm-dis -use-constant-int-for-fixed-length-splat \
+; RUN: -use-constant-fp-for-fixed-length-splat \
+; RUN: -use-constant-int-for-scalable-splat \
+; RUN: -use-constant-fp-for-scalable-splat \
+; RUN: | FileCheck %s
+
+; CHECK: @constant.splat.i1 = constant <1 x i1> splat (i1 true)
+@constant.splat.i1 = constant <1 x i1> splat (i1 true)
+
+; CHECK: @constant.splat.i32 = constant <5 x i32> splat (i32 7)
+@constant.splat.i32 = constant <5 x i32> splat (i32 7)
+
+; CHECK: @constant.splat.i128 = constant <7 x i128> splat (i128 85070591730234615870450834276742070272)
+@constant.splat.i128 = constant <7 x i128> splat (i128 85070591730234615870450834276742070272)
+
+; CHECK: @constant.splat.f16 = constant <2 x half> splat (half 0xHBC00)
+@constant.splat.f16 = constant <2 x half> splat (half 0xHBC00)
+
+; CHECK: @constant.splat.f32 = constant <4 x float> splat (float -2.000000e+00)
+@constant.splat.f32 = constant <4 x float> splat (float -2.000000e+00)
+
+; CHECK: @constant.splat.f64 = constant <6 x double> splat (double -3.000000e+00)
+@constant.splat.f64 = constant <6 x double> splat (double -3.000000e+00)
+
+; CHECK: @constant.splat.128 = constant <8 x fp128> splat (fp128 0xL00000000000000018000000000000000)
+@constant.splat.128 = constant <8 x fp128> splat (fp128 0xL00000000000000018000000000000000)
+
+; CHECK: @constant.splat.bf16 = constant <1 x bfloat> splat (bfloat 0xRC0A0)
+@constant.splat.bf16 = constant <1 x bfloat> splat (bfloat 0xRC0A0)
+
+; CHECK: @constant.splat.x86_fp80 = constant <3 x x86_fp80> splat (x86_fp80 0xK4000C8F5C28F5C28F800)
+@constant.splat.x86_fp80 = constant <3 x x86_fp80> splat (x86_fp80 0xK4000C8F5C28F5C28F800)
+
+; CHECK: @constant.splat.ppc_fp128 = constant <7 x ppc_fp128> splat (ppc_fp128 0xM80000000000000000000000000000000)
+@constant.splat.ppc_fp128 = constant <7 x ppc_fp128> splat (ppc_fp128 0xM80000000000000000000000000000000)
+
+define void @add_fixed_lenth_vector_splat_i32(<4 x i32> %a) {
+; CHECK: %add = add <4 x i32> %a, splat (i32 137)
+ %add = add <4 x i32> %a, splat (i32 137)
+ ret void
+}
+
+define <4 x i32> @ret_fixed_lenth_vector_splat_i32() {
+; CHECK: ret <4 x i32> splat (i32 56)
+ ret <4 x i32> splat (i32 56)
+}
+
+define void @add_fixed_lenth_vector_splat_double(<vscale x 2 x double> %a) {
+; CHECK: %add = fadd <vscale x 2 x double> %a, splat (double 5.700000e+00)
+ %add = fadd <vscale x 2 x double> %a, splat (double 5.700000e+00)
+ ret void
+}
+
+define <vscale x 4 x i32> @ret_scalable_vector_splat_i32() {
+; CHECK: ret <vscale x 4 x i32> splat (i32 78)
+ ret <vscale x 4 x i32> splat (i32 78)
+}
+
+define <4 x i32> @canonical_constant_vector() {
+; CHECK: ret <4 x i32> splat (i32 7)
+ ret <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+}
+
+define <4 x i32> @canonical_fixed_lnegth_vector_zero() {
+; CHECK: ret <4 x i32> zeroinitializer
+ ret <4 x i32> zeroinitializer
+}
+
+define <vscale x 4 x i32> @canonical_scalable_lnegth_vector_zero() {
+; CHECK: ret <vscale x 4 x i32> zeroinitializer
+ ret <vscale x 4 x i32> zeroinitializer
+}
diff --git a/llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll b/llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll
new file mode 100644
index 000000000000..bfe9ab8424bb
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64 -mattr=-fp-armv8 -o - %s | FileCheck %s
+
+define half @f2h(float %a) {
+; CHECK-LABEL: f2h:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __gnu_f2h_ieee
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %0 = fptrunc float %a to half
+ ret half %0
+}
+
+define bfloat @f2bfloat(float %a) {
+; CHECK-LABEL: f2bfloat:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __truncsfbf2
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %0 = fptrunc float %a to bfloat
+ ret bfloat %0
+}
+
diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll
index 1b86fe6c707c..20215fe91469 100644
--- a/llvm/test/CodeGen/AArch64/addsub.ll
+++ b/llvm/test/CodeGen/AArch64/addsub.ll
@@ -662,17 +662,13 @@ define dso_local i32 @_extract_crng_crng() {
; CHECK-NEXT: cmn x8, #1272
; CHECK-NEXT: b.pl .LBB36_3
; CHECK-NEXT: .LBB36_2: // %if.then
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: adrp x8, primary_crng
; CHECK-NEXT: ldr w8, [x8, :lo12:primary_crng]
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: adrp x8, input_pool
; CHECK-NEXT: add x8, x8, :lo12:input_pool
; CHECK-NEXT: csel x0, xzr, x8, eq
-; CHECK-NEXT: bl crng_reseed
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: b crng_reseed
; CHECK-NEXT: .LBB36_3: // %if.end
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll
index 00ae34bf4b00..217f08be0521 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks-local-linkage.ll
@@ -2,7 +2,8 @@
; Validates when local linkage functions get a thunk generated.
-; Being called does not cause a thunk to be generated.
+; Being called does not cause a thunk to be generated or the symbol name to be mangled.
+; CHECK-NOT: "#does_not_have_addr_taken":
; CHECK-NOT: $ientry_thunk$cdecl$v$f;
define internal void @does_not_have_addr_taken(float) nounwind {
ret void
@@ -12,7 +13,8 @@ define void @calls_does_not_have_addr_taken() nounwind {
ret void
}
-; Having an address taken does cause a thunk to be generated.
+; Having an address taken does cause a thunk to be generated and the symbol name to be mangled.
+; CHECK: "#has_addr_taken":
; CHECK: $ientry_thunk$cdecl$v$i8;
define internal void @has_addr_taken(i64) nounwind {
ret void
diff --git a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
index 94041bf00218..e601f03d524a 100644
--- a/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
+++ b/llvm/test/CodeGen/AArch64/callbr-asm-obj-file.ll
@@ -40,7 +40,7 @@ declare dso_local i32 @g(...) local_unnamed_addr
declare dso_local i32 @i(...) local_unnamed_addr
; CHECK-LABEL: <test2>:
-; CHECK: bl {{.*}} <test2+0x18>
+; CHECK: b {{.*}} <test2+0x1c>
; CHECK-LABEL: <$d.5>:
; CHECK-LABEL: <$x.6>:
; CHECK-NEXT: b {{.*}} <test2+0x18>
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
index 3dba21d59b40..aed314507361 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve-calleesaves-fix.mir
@@ -19,8 +19,8 @@
; CHECK-NEXT: // implicit-def: $p4
; CHECK-NEXT: addvl sp, sp, #1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
- ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+ ; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #2
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: .cfi_restore z8
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 213d7919e4a7..f7920e595e44 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -772,9 +772,9 @@ body: |
# CHECK: $sp = frame-destroy ADDXri $sp, 32, 0
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
+# CHECK-NEXT: $z10 = frame-destroy LDR_ZXI $sp, 0
# CHECK-NEXT: $z9 = frame-destroy LDR_ZXI $sp, 1
-# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
+# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
@@ -873,14 +873,14 @@ body: |
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x98, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
# CHECK: $sp = frame-destroy ADDVL_XXI $sp, 1
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x20, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK: $p15 = frame-destroy LDR_PXI $sp, 4
-# CHECK: $p14 = frame-destroy LDR_PXI $sp, 5
-# CHECK: $p5 = frame-destroy LDR_PXI $sp, 14
-# CHECK: $p4 = frame-destroy LDR_PXI $sp, 15
# CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2
# CHECK: $z22 = frame-destroy LDR_ZXI $sp, 3
# CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16
# CHECK: $z8 = frame-destroy LDR_ZXI $sp, 17
+# CHECK: $p15 = frame-destroy LDR_PXI $sp, 4
+# CHECK: $p14 = frame-destroy LDR_PXI $sp, 5
+# CHECK: $p5 = frame-destroy LDR_PXI $sp, 14
+# CHECK: $p4 = frame-destroy LDR_PXI $sp, 15
# CHECK: $sp = frame-destroy ADDVL_XXI $sp, 18
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 32
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
@@ -1037,14 +1037,14 @@ body: |
# CHECK-NEXT: $sp = frame-setup ANDXri killed $[[TMP]]
# CHECK: $sp = frame-destroy ADDVL_XXI $fp, -18
+# CHECK: $z23 = frame-destroy LDR_ZXI $sp, 2
+# CHECK-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 3
+# CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16
+# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 17
# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 4
# CHECK-NEXT: $p14 = frame-destroy LDR_PXI $sp, 5
# CHECK: $p5 = frame-destroy LDR_PXI $sp, 14
# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 15
-# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 2
-# CHECK-NEXT: $z22 = frame-destroy LDR_ZXI $sp, 3
-# CHECK: $z9 = frame-destroy LDR_ZXI $sp, 16
-# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 17
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z9
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z10
@@ -1198,10 +1198,10 @@ body: |
# CHECK: $sp = frame-destroy ADDVL_XXI $sp, 7
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22
-# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6
-# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7
# CHECK-NEXT: $z23 = frame-destroy LDR_ZXI $sp, 1
# CHECK-NEXT: $z8 = frame-destroy LDR_ZXI $sp, 2
+# CHECK-NEXT: $p15 = frame-destroy LDR_PXI $sp, 6
+# CHECK-NEXT: $p4 = frame-destroy LDR_PXI $sp, 7
# CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 3
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION def_cfa $wsp, 16
# CHECK-NEXT: frame-destroy CFI_INSTRUCTION restore $z8
diff --git a/llvm/test/CodeGen/AArch64/reverse-csr-restore-seq.mir b/llvm/test/CodeGen/AArch64/reverse-csr-restore-seq.mir
deleted file mode 100644
index de4baec50e0c..000000000000
--- a/llvm/test/CodeGen/AArch64/reverse-csr-restore-seq.mir
+++ /dev/null
@@ -1,101 +0,0 @@
-# RUN: llc -run-pass=prologepilog -reverse-csr-restore-seq -o - -mtriple=aarch64-- %s | FileCheck %s --check-prefixes=CHECK
-# RUN: llc -start-before=prologepilog -stop-after=aarch64-ldst-opt -reverse-csr-restore-seq -o - -mtriple=aarch64-- %s | FileCheck %s --check-prefixes=CHECK
-#
---- |
-
- define void @foo() nounwind { entry: unreachable }
-
- define void @bar() nounwind { entry: unreachable }
-
- define void @baz() nounwind { entry: unreachable }
-
-...
----
-name: foo
-# CHECK-LABEL: name: foo
-tracksRegLiveness: true
-body: |
- bb.0:
- $x19 = IMPLICIT_DEF
- $x20 = IMPLICIT_DEF
- $x21 = IMPLICIT_DEF
- $x22 = IMPLICIT_DEF
- $x23 = IMPLICIT_DEF
- $x24 = IMPLICIT_DEF
- $x25 = IMPLICIT_DEF
- $x26 = IMPLICIT_DEF
-
- ; The local stack size is 0, so the last ldp in the sequence will also
- ; restore the stack.
- ; CHECK: $x24, $x23 = frame-destroy LDPXi $sp, 2
- ; CHECK-NEXT: $x22, $x21 = frame-destroy LDPXi $sp, 4
- ; CHECK-NEXT: $x20, $x19 = frame-destroy LDPXi $sp, 6
-
- ; The ldp and the stack increment get merged even before
- ; the load-store optimizer.
- ; CHECK-NEXT: early-clobber $sp, $x26, $x25 = frame-destroy LDPXpost $sp, 8
-
- RET_ReallyLR
-...
----
-name: bar
-# CHECK-LABEL: name: bar
-tracksRegLiveness: true
-stack:
- - { id : 0, size: 8, alignment: 4,
- stack-id: default, callee-saved-register: '', callee-saved-restored: true,
- local-offset: -4, debug-info-variable: '', debug-info-expression: '',
- debug-info-location: '' }
-
-body: |
- bb.0:
- $x19 = IMPLICIT_DEF
- $x20 = IMPLICIT_DEF
- $x21 = IMPLICIT_DEF
- $x22 = IMPLICIT_DEF
- $x23 = IMPLICIT_DEF
- $x24 = IMPLICIT_DEF
- $x25 = IMPLICIT_DEF
- $x26 = IMPLICIT_DEF
-
- ; The local stack size is not 0, and we can combine the CSR stack size with
- ; the local stack size. This results in rewriting the offsets for all the
- ; save/restores and forbids us to merge the stack adjustment and the last pop.
- ; In this case, there is no point of moving the first CSR pair at the end.
- ; We do it anyway, as it's a small price to pay for the resulting
- ; simplification in the epilogue emission code.
- ; CHECK: $x24, $x23 = frame-destroy LDPXi $sp, 4
- ; CHECK-NEXT: $x22, $x21 = frame-destroy LDPXi $sp, 6
- ; CHECK-NEXT: $x20, $x19 = frame-destroy LDPXi $sp, 8
- ; CHECK-NEXT: $x26, $x25 = frame-destroy LDPXi $sp, 2
- ; CHECK-NEXT: $sp = frame-destroy ADDXri $sp, 80, 0
- RET_ReallyLR
-...
----
-# Check that the load from the offset 0 is moved at the end even when hasFP is
-# false.
-name: baz
-# CHECK-LABEL: name: baz
-alignment: 4
-tracksRegLiveness: true
-frameInfo:
- adjustsStack: true
- hasCalls: true
-body: |
- bb.0:
- successors: %bb.1
-
- $x0 = IMPLICIT_DEF
- $x20 = IMPLICIT_DEF
- $x21 = IMPLICIT_DEF
-
- ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
- BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit-def $sp, implicit-def $x0
- ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
- B %bb.1
-
- bb.1:
- ; CHECK: $x21, $x20 = frame-destroy LDPXi $sp, 2
- ; CHECK-NEXT: early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 32
- RET_ReallyLR
-...
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
index 296f2be9cfee..6d2abf7e1841 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
@@ -226,30 +226,30 @@ define <vscale x 2 x double> @streaming_compatible_with_scalable_vectors(<vscale
; CHECK-NEXT: ldr z1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: fadd z0.d, z1.d, z0.d
; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
@@ -318,30 +318,30 @@ define <vscale x 2 x i1> @streaming_compatible_with_predicate_vectors(<vscale x
; CHECK-NEXT: ldr p1, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: and p0.b, p1/z, p1.b, p0.b
; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
index 86918a59f381..de676ac5e0d2 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-interface.ll
@@ -187,30 +187,30 @@ define <vscale x 4 x i32> @smstart_clobber_sve(<vscale x 4 x i32> %x) nounwind {
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
@@ -267,30 +267,30 @@ define <vscale x 4 x i32> @smstart_clobber_sve_duplicate(<vscale x 4 x i32> %x)
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #1
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
index b7119fc08256..ea7808d73093 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ld1.ll
@@ -129,7 +129,6 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -145,6 +144,7 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale x
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -284,7 +284,6 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused, <v
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -300,6 +299,7 @@ define <vscale x 32 x i8> @ld1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused, <v
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -440,7 +440,6 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -456,6 +455,7 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -595,7 +595,6 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused,
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -611,6 +610,7 @@ define <vscale x 16 x i16> @ld1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -751,7 +751,6 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -767,6 +766,7 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -906,7 +906,6 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused, <
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -922,6 +921,7 @@ define <vscale x 8 x i32> @ld1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused, <
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1062,7 +1062,6 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1078,6 +1077,7 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1217,7 +1217,6 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused, <
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1233,6 +1232,7 @@ define <vscale x 4 x i64> @ld1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused, <
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1380,7 +1380,6 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused, <v
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1395,6 +1394,7 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused, <v
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1545,7 +1545,6 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %unu
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1560,6 +1559,7 @@ define <vscale x 64 x i8> @ld1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %unu
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1711,7 +1711,6 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused,
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1726,6 +1725,7 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1877,7 +1877,6 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16> %u
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1892,6 +1891,7 @@ define <vscale x 32 x i16> @ld1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16> %u
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -2043,7 +2043,6 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused,
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -2058,6 +2057,7 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -2209,7 +2209,6 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32> %u
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -2224,6 +2223,7 @@ define <vscale x 16 x i32> @ld1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32> %u
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -2375,7 +2375,6 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused, <
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -2390,6 +2389,7 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused, <
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -2541,7 +2541,6 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %un
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -2556,6 +2555,7 @@ define <vscale x 8 x i64> @ld1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %un
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
index 1fb251a4f628..7e2d28fbf798 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-ldnt1.ll
@@ -82,7 +82,6 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -98,6 +97,7 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8(<vscale x 16 x i8> %unused, <vscale
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -190,7 +190,6 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused,
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -206,6 +205,7 @@ define <vscale x 32 x i8> @ldnt1_x2_i8_z0_z8_scalar(<vscale x 16 x i8> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -299,7 +299,6 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vsca
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -315,6 +314,7 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8(<vscale x 8 x i16> %unused, <vsca
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -407,7 +407,6 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -423,6 +422,7 @@ define <vscale x 16 x i16> @ldnt1_x2_i16_z0_z8_scalar(<vscale x 8 x i16> %unused
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -516,7 +516,6 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscal
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -532,6 +531,7 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8(<vscale x 4 x i32> %unused, <vscal
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -624,7 +624,6 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused,
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -640,6 +639,7 @@ define <vscale x 8 x i32> @ldnt1_x2_i32_z0_z8_scalar(<vscale x 4 x i32> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -733,7 +733,6 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscal
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -749,6 +748,7 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8(<vscale x 2 x i64> %unused, <vscal
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -841,7 +841,6 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused,
; CONTIGUOUS-NEXT: ldr z0, [sp]
; CONTIGUOUS-NEXT: ldr z1, [sp, #1, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #2
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -857,6 +856,7 @@ define <vscale x 4 x i64> @ldnt1_x2_i64_z0_z8_scalar(<vscale x 2 x i64> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #16
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -955,7 +955,6 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused,
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -970,6 +969,7 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12(<vscale x 16 x i8> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1071,7 +1071,6 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %u
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1086,6 +1085,7 @@ define <vscale x 64 x i8> @ldnt1_x4_i8_z0_z4_z8_z12_scalar(<vscale x 16 x i8> %u
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1188,7 +1188,6 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1203,6 +1202,7 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12(<vscale x 8 x i16> %unused
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1304,7 +1304,6 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16>
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1319,6 +1318,7 @@ define <vscale x 32 x i16> @ldnt1_x4_i16_z0_z4_z8_z12_scalar(<vscale x 8 x i16>
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1421,7 +1421,6 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1436,6 +1435,7 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12(<vscale x 4 x i32> %unused
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1537,7 +1537,6 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32>
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1552,6 +1551,7 @@ define <vscale x 16 x i32> @ldnt1_x4_i32_z0_z4_z8_z12_scalar(<vscale x 4 x i32>
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1654,7 +1654,6 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused,
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1669,6 +1668,7 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12(<vscale x 2 x i64> %unused,
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
@@ -1770,7 +1770,6 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %
; CONTIGUOUS-NEXT: ldr z2, [sp, #2, mul vl]
; CONTIGUOUS-NEXT: ldr z3, [sp, #3, mul vl]
; CONTIGUOUS-NEXT: addvl sp, sp, #4
-; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -1785,6 +1784,7 @@ define <vscale x 8 x i64> @ldnt1_x4_i64_z0_z4_z8_z12_scalar(<vscale x 2 x i64> %
; CONTIGUOUS-NEXT: ldr z11, [sp, #12, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z10, [sp, #13, mul vl] // 16-byte Folded Reload
; CONTIGUOUS-NEXT: ldr z9, [sp, #14, mul vl] // 16-byte Folded Reload
+; CONTIGUOUS-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CONTIGUOUS-NEXT: addvl sp, sp, #15
; CONTIGUOUS-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CONTIGUOUS-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
index 1ad78709d501..56d865ef83e6 100644
--- a/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
+++ b/llvm/test/CodeGen/AArch64/stack-probing-sve.ll
@@ -380,7 +380,6 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
-; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
@@ -397,6 +396,7 @@ define void @sve_16v_1p_csr(<vscale x 4 x float> %a) #0 {
; CHECK-NEXT: ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #17
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: .cfi_restore z8
@@ -697,10 +697,10 @@ define void @sve_unprobed_area(<vscale x 4 x float> %a, i32 %n) #0 {
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: addvl sp, sp, #4
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
-; CHECK-NEXT: ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #3, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #4
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: .cfi_restore z8
diff --git a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
index 37186cf22ccc..9fa5208cc8db 100644
--- a/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
+++ b/llvm/test/CodeGen/AArch64/strictfp_f16_abi_promote.ll
@@ -70,22 +70,20 @@ define void @v3f16_arg(<3 x half> %arg, ptr %ptr) #0 {
; NOFP16-NEXT: .cfi_offset w22, -32
; NOFP16-NEXT: .cfi_offset w30, -48
; NOFP16-NEXT: mov w21, w0
-; NOFP16-NEXT: and w0, w2, #0xffff
+; NOFP16-NEXT: and w0, w1, #0xffff
; NOFP16-NEXT: mov x19, x3
-; NOFP16-NEXT: mov w20, w1
+; NOFP16-NEXT: mov w20, w2
; NOFP16-NEXT: bl __gnu_h2f_ieee
; NOFP16-NEXT: mov w22, w0
; NOFP16-NEXT: and w0, w21, #0xffff
; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w21, w0
+; NOFP16-NEXT: mov w8, w0
; NOFP16-NEXT: and w0, w20, #0xffff
+; NOFP16-NEXT: orr x21, x8, x22, lsl #32
; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w8, w21
-; NOFP16-NEXT: // kill: def $w0 killed $w0 def $x0
-; NOFP16-NEXT: str w22, [x19, #8]
-; NOFP16-NEXT: orr x8, x8, x0, lsl #32
+; NOFP16-NEXT: str x21, [x19]
; NOFP16-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
-; NOFP16-NEXT: str x8, [x19]
+; NOFP16-NEXT: str w0, [x19, #8]
; NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; NOFP16-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload
; NOFP16-NEXT: ret
@@ -133,26 +131,107 @@ define void @v4f16_arg(<4 x half> %arg, ptr %ptr) #0 {
ret void
}
-; FIXME:
-; define half @f16_return(float %arg) #0 {
-; %fptrunc = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
-; ret half %fptrunc
-; }
+ define half @f16_return(float %arg) #0 {
+; NOFP16-LABEL: f16_return:
+; NOFP16: // %bb.0:
+; NOFP16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; NOFP16-NEXT: .cfi_def_cfa_offset 16
+; NOFP16-NEXT: .cfi_offset w30, -16
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; NOFP16-NEXT: ret
+ %fptrunc = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret half %fptrunc
+ }
-; define <2 x half> @v2f16_return(<2 x float> %arg) #0 {
-; %fptrunc = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
-; ret <2 x half> %fptrunc
-; }
+ define <2 x half> @v2f16_return(<2 x float> %arg) #0 {
+; NOFP16-LABEL: v2f16_return:
+; NOFP16: // %bb.0:
+; NOFP16-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; NOFP16-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; NOFP16-NEXT: .cfi_def_cfa_offset 32
+; NOFP16-NEXT: .cfi_offset w19, -8
+; NOFP16-NEXT: .cfi_offset w20, -16
+; NOFP16-NEXT: .cfi_offset w30, -32
+; NOFP16-NEXT: mov w19, w0
+; NOFP16-NEXT: mov w0, w1
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w20, w0
+; NOFP16-NEXT: mov w0, w19
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w1, w20
+; NOFP16-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; NOFP16-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; NOFP16-NEXT: ret
+ %fptrunc = call <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret <2 x half> %fptrunc
+ }
-; define <3 x half> @v3f16_return(<3 x float> %arg) #0 {
-; %fptrunc = call <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f32(<3 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
-; ret <3 x half> %fptrunc
-; }
+ define <3 x half> @v3f16_return(<3 x float> %arg) #0 {
+; NOFP16-LABEL: v3f16_return:
+; NOFP16: // %bb.0:
+; NOFP16-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; NOFP16-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; NOFP16-NEXT: .cfi_def_cfa_offset 32
+; NOFP16-NEXT: .cfi_offset w19, -8
+; NOFP16-NEXT: .cfi_offset w20, -16
+; NOFP16-NEXT: .cfi_offset w21, -24
+; NOFP16-NEXT: .cfi_offset w30, -32
+; NOFP16-NEXT: mov w20, w0
+; NOFP16-NEXT: mov w0, w2
+; NOFP16-NEXT: mov w19, w1
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w21, w0
+; NOFP16-NEXT: mov w0, w19
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w19, w0
+; NOFP16-NEXT: mov w0, w20
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w1, w19
+; NOFP16-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; NOFP16-NEXT: mov w2, w21
+; NOFP16-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; NOFP16-NEXT: ret
+ %fptrunc = call <3 x half> @llvm.experimental.constrained.fptrunc.v3f16.v3f32(<3 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret <3 x half> %fptrunc
+ }
-; define <4 x half> @v4f16_return(<4 x float> %arg) #0 {
-; %fptrunc = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
-; ret <4 x half> %fptrunc
-; }
+ define <4 x half> @v4f16_return(<4 x float> %arg) #0 {
+; NOFP16-LABEL: v4f16_return:
+; NOFP16: // %bb.0:
+; NOFP16-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill
+; NOFP16-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
+; NOFP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; NOFP16-NEXT: .cfi_def_cfa_offset 48
+; NOFP16-NEXT: .cfi_offset w19, -8
+; NOFP16-NEXT: .cfi_offset w20, -16
+; NOFP16-NEXT: .cfi_offset w21, -24
+; NOFP16-NEXT: .cfi_offset w22, -32
+; NOFP16-NEXT: .cfi_offset w30, -48
+; NOFP16-NEXT: mov w21, w0
+; NOFP16-NEXT: mov w0, w3
+; NOFP16-NEXT: mov w19, w2
+; NOFP16-NEXT: mov w20, w1
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w22, w0
+; NOFP16-NEXT: mov w0, w19
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w19, w0
+; NOFP16-NEXT: mov w0, w20
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w20, w0
+; NOFP16-NEXT: mov w0, w21
+; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: mov w1, w20
+; NOFP16-NEXT: mov w2, w19
+; NOFP16-NEXT: mov w3, w22
+; NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; NOFP16-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
+; NOFP16-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload
+; NOFP16-NEXT: ret
+ %fptrunc = call <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float> %arg, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ ret <4 x half> %fptrunc
+ }
; FIXME:
; define void @outgoing_f16_arg(ptr %ptr) #0 {
@@ -182,46 +261,17 @@ define void @v4f16_arg(<4 x half> %arg, ptr %ptr) #0 {
define void @outgoing_v4f16_return(ptr %ptr) #0 {
; NOFP16-LABEL: outgoing_v4f16_return:
; NOFP16: // %bb.0:
-; NOFP16-NEXT: stp x30, x23, [sp, #-48]! // 16-byte Folded Spill
-; NOFP16-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; NOFP16-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill
-; NOFP16-NEXT: .cfi_def_cfa_offset 48
+; NOFP16-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; NOFP16-NEXT: .cfi_def_cfa_offset 16
; NOFP16-NEXT: .cfi_offset w19, -8
-; NOFP16-NEXT: .cfi_offset w20, -16
-; NOFP16-NEXT: .cfi_offset w21, -24
-; NOFP16-NEXT: .cfi_offset w22, -32
-; NOFP16-NEXT: .cfi_offset w23, -40
-; NOFP16-NEXT: .cfi_offset w30, -48
+; NOFP16-NEXT: .cfi_offset w30, -16
; NOFP16-NEXT: mov x19, x0
; NOFP16-NEXT: bl v4f16_result
-; NOFP16-NEXT: and w0, w0, #0xffff
-; NOFP16-NEXT: mov w20, w1
-; NOFP16-NEXT: mov w21, w2
-; NOFP16-NEXT: mov w22, w3
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w23, w0
-; NOFP16-NEXT: and w0, w20, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w20, w0
-; NOFP16-NEXT: and w0, w21, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w21, w0
-; NOFP16-NEXT: and w0, w22, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #6]
-; NOFP16-NEXT: mov w0, w21
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #4]
-; NOFP16-NEXT: mov w0, w20
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #2]
-; NOFP16-NEXT: mov w0, w23
-; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: strh w2, [x19, #4]
+; NOFP16-NEXT: strh w3, [x19, #6]
+; NOFP16-NEXT: strh w1, [x19, #2]
; NOFP16-NEXT: strh w0, [x19]
-; NOFP16-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; NOFP16-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload
-; NOFP16-NEXT: ldp x30, x23, [sp], #48 // 16-byte Folded Reload
+; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; NOFP16-NEXT: ret
%val = call <4 x half> @v4f16_result()
store <4 x half> %val, ptr %ptr
@@ -231,82 +281,21 @@ define void @outgoing_v4f16_return(ptr %ptr) #0 {
define void @outgoing_v8f16_return(ptr %ptr) #0 {
; NOFP16-LABEL: outgoing_v8f16_return:
; NOFP16: // %bb.0:
-; NOFP16-NEXT: stp x30, x27, [sp, #-80]! // 16-byte Folded Spill
-; NOFP16-NEXT: stp x26, x25, [sp, #16] // 16-byte Folded Spill
-; NOFP16-NEXT: stp x24, x23, [sp, #32] // 16-byte Folded Spill
-; NOFP16-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill
-; NOFP16-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; NOFP16-NEXT: .cfi_def_cfa_offset 80
+; NOFP16-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; NOFP16-NEXT: .cfi_def_cfa_offset 16
; NOFP16-NEXT: .cfi_offset w19, -8
-; NOFP16-NEXT: .cfi_offset w20, -16
-; NOFP16-NEXT: .cfi_offset w21, -24
-; NOFP16-NEXT: .cfi_offset w22, -32
-; NOFP16-NEXT: .cfi_offset w23, -40
-; NOFP16-NEXT: .cfi_offset w24, -48
-; NOFP16-NEXT: .cfi_offset w25, -56
-; NOFP16-NEXT: .cfi_offset w26, -64
-; NOFP16-NEXT: .cfi_offset w27, -72
-; NOFP16-NEXT: .cfi_offset w30, -80
+; NOFP16-NEXT: .cfi_offset w30, -16
; NOFP16-NEXT: mov x19, x0
; NOFP16-NEXT: bl v8f16_result
-; NOFP16-NEXT: and w0, w0, #0xffff
-; NOFP16-NEXT: mov w21, w1
-; NOFP16-NEXT: mov w22, w2
-; NOFP16-NEXT: mov w23, w3
-; NOFP16-NEXT: mov w24, w4
-; NOFP16-NEXT: mov w25, w5
-; NOFP16-NEXT: mov w26, w6
-; NOFP16-NEXT: mov w27, w7
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w20, w0
-; NOFP16-NEXT: and w0, w21, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w21, w0
-; NOFP16-NEXT: and w0, w22, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w22, w0
-; NOFP16-NEXT: and w0, w23, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w23, w0
-; NOFP16-NEXT: and w0, w24, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w24, w0
-; NOFP16-NEXT: and w0, w25, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w25, w0
-; NOFP16-NEXT: and w0, w26, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: mov w26, w0
-; NOFP16-NEXT: and w0, w27, #0xffff
-; NOFP16-NEXT: bl __gnu_h2f_ieee
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #14]
-; NOFP16-NEXT: mov w0, w26
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #12]
-; NOFP16-NEXT: mov w0, w25
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #10]
-; NOFP16-NEXT: mov w0, w24
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #8]
-; NOFP16-NEXT: mov w0, w23
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #6]
-; NOFP16-NEXT: mov w0, w22
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #4]
-; NOFP16-NEXT: mov w0, w21
-; NOFP16-NEXT: bl __gnu_f2h_ieee
-; NOFP16-NEXT: strh w0, [x19, #2]
-; NOFP16-NEXT: mov w0, w20
-; NOFP16-NEXT: bl __gnu_f2h_ieee
+; NOFP16-NEXT: strh w5, [x19, #10]
+; NOFP16-NEXT: strh w7, [x19, #14]
+; NOFP16-NEXT: strh w6, [x19, #12]
+; NOFP16-NEXT: strh w4, [x19, #8]
+; NOFP16-NEXT: strh w3, [x19, #6]
+; NOFP16-NEXT: strh w2, [x19, #4]
+; NOFP16-NEXT: strh w1, [x19, #2]
; NOFP16-NEXT: strh w0, [x19]
-; NOFP16-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
-; NOFP16-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload
-; NOFP16-NEXT: ldp x24, x23, [sp, #32] // 16-byte Folded Reload
-; NOFP16-NEXT: ldp x26, x25, [sp, #16] // 16-byte Folded Reload
-; NOFP16-NEXT: ldp x30, x27, [sp], #80 // 16-byte Folded Reload
+; NOFP16-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; NOFP16-NEXT: ret
%val = call <8 x half> @v8f16_result()
store <8 x half> %val, ptr %ptr
diff --git a/llvm/test/CodeGen/AArch64/sve-alloca.ll b/llvm/test/CodeGen/AArch64/sve-alloca.ll
index 47e49b84aaaf..d227538043fc 100644
--- a/llvm/test/CodeGen/AArch64/sve-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/sve-alloca.ll
@@ -66,30 +66,30 @@ define void @foo(<vscale x 4 x i64> %dst, i1 %cond) {
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: bl bar
; CHECK-NEXT: addvl sp, x29, #-18
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 9851583b950e..3965af6a9066 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -567,30 +567,30 @@ define <vscale x 4 x float> @sve_caller_non_sve_callee_high_range(<vscale x 4 x
; CHECK-NEXT: bl non_sve_callee_high_range
; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #3
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
@@ -659,30 +659,30 @@ define <vscale x 4 x float> @sve_ret_caller_non_sve_callee_high_range() {
; CHECK-NEXT: fmov s7, #7.00000000
; CHECK-NEXT: bl non_sve_callee_high_range
; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve-tailcall.ll b/llvm/test/CodeGen/AArch64/sve-tailcall.ll
index f32c80d392b6..4ddf007768fd 100644
--- a/llvm/test/CodeGen/AArch64/sve-tailcall.ll
+++ b/llvm/test/CodeGen/AArch64/sve-tailcall.ll
@@ -83,30 +83,30 @@ define i32 @sve_caller_non_sve_callee(<vscale x 4 x i32> %arg) nounwind {
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: bl non_sve_callee
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
@@ -158,30 +158,30 @@ define i32 @sve_caller_non_sve_callee_fastcc(<vscale x 4 x i32> %arg) nounwind {
; CHECK-NEXT: //APP
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: bl non_sve_callee
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-pred.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-pred.ll
index 7cae1d2c216b..a592dcd4b8ce 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-pred.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-pred.ll
@@ -4,12 +4,7 @@
define <vscale x 16 x i1> @test_pmov_to_pred_i8(<vscale x 16 x i8> %zn) {
; CHECK-LABEL: test_pmov_to_pred_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.pred.lane.nxv16i8
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov p0.b, z0
; CHECK-NEXT: ret
entry:
%res = call <vscale x 16 x i1> @llvm.aarch64.sve.pmov.to.pred.lane.nxv16i8(<vscale x 16 x i8> %zn, i32 0)
@@ -19,27 +14,10 @@ define <vscale x 16 x i1> @test_pmov_to_pred_i8(<vscale x 16 x i8> %zn) {
define <vscale x 8 x i1> @test_pmov_to_pred_i16(<vscale x 8 x i16> %zn) {
; CHECK-LABEL: test_pmov_to_pred_i16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
-; CHECK-NEXT: .cfi_offset w30, -8
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: mov z8.d, z0.d
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.pred.lane.nxv8i16
-; CHECK-NEXT: mov z0.d, z8.d
-; CHECK-NEXT: mov w0, #1 // =0x1
-; CHECK-NEXT: mov p4.b, p0.b
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.pred.lane.nxv8i16
-; CHECK-NEXT: ptrue p1.h
-; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: eor p0.b, p1/z, p4.b, p0.b
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: pmov p1.h, z0[0]
+; CHECK-NEXT: pmov p2.h, z0[1]
+; CHECK-NEXT: eor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
entry:
%res1 = call <vscale x 8 x i1> @llvm.aarch64.sve.pmov.to.pred.lane.nxv8i16(<vscale x 8 x i16> %zn, i32 0)
@@ -52,27 +30,10 @@ define <vscale x 8 x i1> @test_pmov_to_pred_i16(<vscale x 8 x i16> %zn) {
define <vscale x 4 x i1> @test_pmov_to_pred_i32(<vscale x 4 x i32> %zn) {
; CHECK-LABEL: test_pmov_to_pred_i32:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
-; CHECK-NEXT: .cfi_offset w30, -8
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: mov z8.d, z0.d
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.pred.lane.nxv4i32
-; CHECK-NEXT: mov z0.d, z8.d
-; CHECK-NEXT: mov w0, #3 // =0x3
-; CHECK-NEXT: mov p4.b, p0.b
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.pred.lane.nxv4i32
-; CHECK-NEXT: ptrue p1.s
-; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: eor p0.b, p1/z, p4.b, p0.b
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ptrue p0.s
+; CHECK-NEXT: pmov p1.s, z0[0]
+; CHECK-NEXT: pmov p2.s, z0[3]
+; CHECK-NEXT: eor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
entry:
%res1 = call <vscale x 4 x i1> @llvm.aarch64.sve.pmov.to.pred.lane.nxv4i32(<vscale x 4 x i32> %zn, i32 0)
@@ -85,27 +46,10 @@ define <vscale x 4 x i1> @test_pmov_to_pred_i32(<vscale x 4 x i32> %zn) {
define <vscale x 2 x i1> @test_pmov_to_pred_i64(<vscale x 2 x i64> %zn) {
; CHECK-LABEL: test_pmov_to_pred_i64:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: addvl sp, sp, #-2
-; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill
-; CHECK-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
-; CHECK-NEXT: .cfi_offset w30, -8
-; CHECK-NEXT: .cfi_offset w29, -16
-; CHECK-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: mov z8.d, z0.d
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.pred.lane.nxv2i64
-; CHECK-NEXT: mov z0.d, z8.d
-; CHECK-NEXT: mov w0, #7 // =0x7
-; CHECK-NEXT: mov p4.b, p0.b
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.pred.lane.nxv2i64
-; CHECK-NEXT: ptrue p1.d
-; CHECK-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
-; CHECK-NEXT: eor p0.b, p1/z, p4.b, p0.b
-; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: addvl sp, sp, #2
-; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: pmov p1.d, z0[0]
+; CHECK-NEXT: pmov p2.d, z0[7]
+; CHECK-NEXT: eor p0.b, p0/z, p1.b, p2.b
; CHECK-NEXT: ret
entry:
%res1 = call <vscale x 2 x i1> @llvm.aarch64.sve.pmov.to.pred.lane.nxv2i64(<vscale x 2 x i64> %zn, i32 0)
diff --git a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-vector.ll b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-vector.ll
index 58b240b0fbd6..b7f36c651023 100644
--- a/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve2p1-intrinsics-pmov-to-vector.ll
@@ -6,12 +6,7 @@
define <vscale x 8 x i16> @test_pmov_to_vector_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i1> %pn) {
; CHECK-LABEL: test_pmov_to_vector_i16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov w0, #1 // =0x1
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov z0[1], p0.h
; CHECK-NEXT: ret
entry:
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i1> %pn, i32 1)
@@ -21,12 +16,7 @@ define <vscale x 8 x i16> @test_pmov_to_vector_i16(<vscale x 8 x i16> %zn, <vsca
define <vscale x 4 x i32> @test_pmov_to_vector_i32(<vscale x 4 x i32> %zn, <vscale x 4 x i1> %pn) {
; CHECK-LABEL: test_pmov_to_vector_i32:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov w0, #3 // =0x3
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov z0[3], p0.s
; CHECK-NEXT: ret
entry:
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i1> %pn, i32 3)
@@ -36,12 +26,7 @@ define <vscale x 4 x i32> @test_pmov_to_vector_i32(<vscale x 4 x i32> %zn, <vsca
define <vscale x 2 x i64> @test_pmov_to_vector_i64(<vscale x 2 x i64> %zn, <vscale x 2 x i1> %pn) {
; CHECK-LABEL: test_pmov_to_vector_i64:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov w0, #7 // =0x7
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov z0[7], p0.d
; CHECK-NEXT: ret
entry:
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i1> %pn, i32 7)
@@ -54,11 +39,7 @@ define <vscale x 2 x i64> @test_pmov_to_vector_i64(<vscale x 2 x i64> %zn, <vsca
define <vscale x 16 x i8> @test_pmov_to_vector_zero_i8(<vscale x 16 x i1> %pn) {
; CHECK-LABEL: test_pmov_to_vector_zero_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov z0, p0.b
; CHECK-NEXT: ret
entry:
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8(<vscale x 16 x i1> %pn)
@@ -68,11 +49,7 @@ define <vscale x 16 x i8> @test_pmov_to_vector_zero_i8(<vscale x 16 x i1> %pn) {
define <vscale x 8 x i16> @test_pmov_to_vector_zero_i16(<vscale x 8 x i1> %pn) {
; CHECK-LABEL: test_pmov_to_vector_zero_i16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov z0[0], p0.h
; CHECK-NEXT: ret
entry:
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16(<vscale x 8 x i1> %pn)
@@ -82,11 +59,7 @@ define <vscale x 8 x i16> @test_pmov_to_vector_zero_i16(<vscale x 8 x i1> %pn) {
define <vscale x 4 x i32> @test_pmov_to_vector_zero_i32(<vscale x 4 x i1> %pn) {
; CHECK-LABEL: test_pmov_to_vector_zero_i32:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov z0[0], p0.s
; CHECK-NEXT: ret
entry:
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32(<vscale x 4 x i1> %pn)
@@ -96,11 +69,7 @@ define <vscale x 4 x i32> @test_pmov_to_vector_zero_i32(<vscale x 4 x i1> %pn) {
define <vscale x 2 x i64> @test_pmov_to_vector_zero_i64(<vscale x 2 x i1> %pn) {
; CHECK-LABEL: test_pmov_to_vector_zero_i64:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: bl llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: pmov z0[0], p0.d
; CHECK-NEXT: ret
entry:
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64(<vscale x 2 x i1> %pn)
diff --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index f3c4d217e6fc..822be14faaeb 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -63,18 +63,6 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; CHECK-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -91,6 +79,18 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #18
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: .cfi_restore z8
@@ -112,18 +112,6 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #2
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -140,6 +128,18 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; CHECK-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; CHECK-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; CHECK-NEXT: addvl sp, sp, #18
; CHECK-NEXT: .cfi_def_cfa wsp, 16
; CHECK-NEXT: .cfi_restore z8
@@ -215,18 +215,6 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; GISEL-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: addvl sp, sp, #2
; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -243,6 +231,18 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; GISEL-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; GISEL-NEXT: addvl sp, sp, #18
; GISEL-NEXT: .cfi_def_cfa wsp, 16
; GISEL-NEXT: .cfi_restore z8
@@ -264,18 +264,6 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; GISEL-NEXT: ldr z0, [sp] // 16-byte Folded Reload
; GISEL-NEXT: addvl sp, sp, #2
; GISEL-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x90, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 144 * VG
-; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
-; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; GISEL-NEXT: ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
@@ -292,6 +280,18 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) uw
; GISEL-NEXT: ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
; GISEL-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT: ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
; GISEL-NEXT: addvl sp, sp, #18
; GISEL-NEXT: .cfi_def_cfa wsp, 16
; GISEL-NEXT: .cfi_restore z8
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-extract-vector-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-extract-vector-load.mir
new file mode 100644
index 000000000000..aa72a9ec06ed
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-extract-vector-load.mir
@@ -0,0 +1,40 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
+
+# Tries to emit a foldable G_PTR_ADD with (p1, s32) operands.
+---
+name: test_ptradd_crash__offset_smaller
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_ptradd_crash__offset_smaller
+ ; CHECK: [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 12
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[C]](p1) :: (load (s32), addrspace 1)
+ ; CHECK-NEXT: $sgpr0 = COPY [[LOAD]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
+ %1:_(p1) = G_CONSTANT i64 0
+ %3:_(s32) = G_CONSTANT i32 3
+ %0:_(<4 x s32>) = G_LOAD %1 :: (load (<4 x s32>) from `ptr addrspace(1) null`, addrspace 1)
+ %2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %3
+ $sgpr0 = COPY %2
+ SI_RETURN_TO_EPILOG implicit $sgpr0
+...
+
+# Tries to emit a foldable G_PTR_ADD with (p1, s128) operands.
+---
+name: test_ptradd_crash__offset_wider
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_ptradd_crash__offset_wider
+ ; CHECK: [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 12
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[C]](p1) :: (load (s32), addrspace 1)
+ ; CHECK-NEXT: $sgpr0 = COPY [[LOAD]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
+ %1:_(p1) = G_CONSTANT i64 0
+ %3:_(s128) = G_CONSTANT i128 3
+ %0:_(<4 x s32>) = G_LOAD %1 :: (load (<4 x s32>) from `ptr addrspace(1) null`, addrspace 1)
+ %2:_(s32) = G_EXTRACT_VECTOR_ELT %0, %3
+ $sgpr0 = COPY %2
+ SI_RETURN_TO_EPILOG implicit $sgpr0
+...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir
new file mode 100644
index 000000000000..6c5339e36c77
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fdiv-sqrt-to-rsq.mir
@@ -0,0 +1,584 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
+
+---
+name: rsq_f16
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f16
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16)
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %one:_(s16) = G_FCONSTANT half 1.0
+ %rsq:_(s16) = contract G_FDIV %one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: rsq_f16_missing_contract0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f16_missing_contract0
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s16) = G_FSQRT %x
+ ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = G_FSQRT %x
+ %one:_(s16) = G_FCONSTANT half 1.0
+ %rsq:_(s16) = contract G_FDIV %one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: rsq_f16_missing_contract1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f16_missing_contract1
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x
+ ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00
+ ; GCN-NEXT: %rsq:_(s16) = G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %one:_(s16) = G_FCONSTANT half 1.0
+ %rsq:_(s16) = G_FDIV %one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: neg_rsq_f16
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: neg_rsq_f16
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16)
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FNEG [[INT]]
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %neg_one:_(s16) = G_FCONSTANT half -1.0
+ %rsq:_(s16) = contract G_FDIV %neg_one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: neg_rsq_f16_missing_contract0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: neg_rsq_f16_missing_contract0
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s16) = G_FSQRT %x
+ ; GCN-NEXT: %neg_one:_(s16) = G_FCONSTANT half 0xHBC00
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %neg_one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = G_FSQRT %x
+ %neg_one:_(s16) = G_FCONSTANT half -1.0
+ %rsq:_(s16) = contract G_FDIV %neg_one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: neg_rsq_f16_missing_contract1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: neg_rsq_f16_missing_contract1
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x
+ ; GCN-NEXT: %neg_one:_(s16) = G_FCONSTANT half 0xHBC00
+ ; GCN-NEXT: %rsq:_(s16) = G_FDIV %neg_one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %neg_one:_(s16) = G_FCONSTANT half -1.0
+ %rsq:_(s16) = G_FDIV %neg_one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: rsq_f16_multi_use
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f16_multi_use
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x
+ ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ ; GCN-NEXT: S_ENDPGM 0, implicit %sqrt(s16)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %one:_(s16) = G_FCONSTANT half 1.0
+ %rsq:_(s16) = contract G_FDIV %one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+ S_ENDPGM 0, implicit %sqrt
+
+...
+
+---
+name: rsq_f16_multi_use_missing_contract0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f16_multi_use_missing_contract0
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s16) = G_FSQRT %x
+ ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ ; GCN-NEXT: S_ENDPGM 0, implicit %sqrt(s16)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = G_FSQRT %x
+ %one:_(s16) = G_FCONSTANT half 1.0
+ %rsq:_(s16) = contract G_FDIV %one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+ S_ENDPGM 0, implicit %sqrt
+
+...
+
+---
+name: rsq_f16_multi_use_missing_contract1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f16_multi_use_missing_contract1
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s16) = contract G_FSQRT %x
+ ; GCN-NEXT: %one:_(s16) = G_FCONSTANT half 0xH3C00
+ ; GCN-NEXT: %rsq:_(s16) = G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ ; GCN-NEXT: S_ENDPGM 0, implicit %sqrt(s16)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %one:_(s16) = G_FCONSTANT half 1.0
+ %rsq:_(s16) = G_FDIV %one, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+ S_ENDPGM 0, implicit %sqrt
+
+...
+
+---
+name: rsq_f32
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f32
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: %x:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %sqrt:_(s32) = contract G_FSQRT %x
+ ; GCN-NEXT: %one:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GCN-NEXT: %rsq:_(s32) = contract G_FDIV %one, %sqrt
+ ; GCN-NEXT: $vgpr0 = COPY %rsq(s32)
+ %x:_(s32) = COPY $vgpr0
+ %sqrt:_(s32) = contract G_FSQRT %x
+ %one:_(s32) = G_FCONSTANT float 1.0
+ %rsq:_(s32) = contract G_FDIV %one, %sqrt
+ $vgpr0 = COPY %rsq
+
+...
+
+---
+name: neg_rsq_f32
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: neg_rsq_f32
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: %x:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %sqrt:_(s32) = contract G_FSQRT %x
+ ; GCN-NEXT: %neg_one:_(s32) = G_FCONSTANT float -1.000000e+00
+ ; GCN-NEXT: %rsq:_(s32) = contract G_FDIV %neg_one, %sqrt
+ ; GCN-NEXT: $vgpr0 = COPY %rsq(s32)
+ %x:_(s32) = COPY $vgpr0
+ %sqrt:_(s32) = contract G_FSQRT %x
+ %neg_one:_(s32) = G_FCONSTANT float -1.0
+ %rsq:_(s32) = contract G_FDIV %neg_one, %sqrt
+ $vgpr0 = COPY %rsq
+
+...
+
+---
+name: afn_rsq_f32
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: afn_rsq_f32
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: %x:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %sqrt:_(s32) = contract afn G_FSQRT %x
+ ; GCN-NEXT: %one:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GCN-NEXT: %rsq:_(s32) = contract afn G_FDIV %one, %sqrt
+ ; GCN-NEXT: $vgpr0 = COPY %rsq(s32)
+ %x:_(s32) = COPY $vgpr0
+ %sqrt:_(s32) = contract afn G_FSQRT %x
+ %one:_(s32) = G_FCONSTANT float 1.0
+ %rsq:_(s32) = contract afn G_FDIV %one, %sqrt
+ $vgpr0 = COPY %rsq
+
+...
+
+---
+name: afn_rsq_f32_multi_use
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: afn_rsq_f32_multi_use
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: %x:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %sqrt:_(s32) = contract afn G_FSQRT %x
+ ; GCN-NEXT: %one:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GCN-NEXT: %rsq:_(s32) = contract afn G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ret:_(s32) = G_FSUB %sqrt, %rsq
+ ; GCN-NEXT: $vgpr0 = COPY %ret(s32)
+ %x:_(s32) = COPY $vgpr0
+ %sqrt:_(s32) = contract afn G_FSQRT %x
+ %one:_(s32) = G_FCONSTANT float 1.0
+ %rsq:_(s32) = contract afn G_FDIV %one, %sqrt
+ %ret:_(s32) = G_FSUB %sqrt, %rsq
+ $vgpr0 = COPY %ret
+
+...
+
+---
+name: afn_neg_rsq_f32
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: afn_neg_rsq_f32
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: %x:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %sqrt:_(s32) = contract afn G_FSQRT %x
+ ; GCN-NEXT: %neg_one:_(s32) = G_FCONSTANT float -1.000000e+00
+ ; GCN-NEXT: %rsq:_(s32) = contract afn G_FDIV %neg_one, %sqrt
+ ; GCN-NEXT: $vgpr0 = COPY %rsq(s32)
+ %x:_(s32) = COPY $vgpr0
+ %sqrt:_(s32) = contract afn G_FSQRT %x
+ %neg_one:_(s32) = G_FCONSTANT float -1.0
+ %rsq:_(s32) = contract afn G_FDIV %neg_one, %sqrt
+ $vgpr0 = COPY %rsq
+
+...
+
+
+---
+name: rsq_f64
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_f64
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s64) = contract G_FSQRT %x
+ ; GCN-NEXT: %one:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GCN-NEXT: %rsq:_(s64) = contract G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s64) = G_ANYEXT %0:_(s32)
+ %sqrt:_(s64) = contract G_FSQRT %x
+ %one:_(s64) = G_FCONSTANT double 1.0
+ %rsq:_(s64) = contract G_FDIV %one, %sqrt
+ %ext:_(s32) = G_TRUNC %rsq:_(s64)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: neg_rsq_f64
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: neg_rsq_f64
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s64) = contract G_FSQRT %x
+ ; GCN-NEXT: %neg_one:_(s64) = G_FCONSTANT double -1.000000e+00
+ ; GCN-NEXT: %rsq:_(s64) = contract G_FDIV %neg_one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s64) = G_ANYEXT %0:_(s32)
+ %sqrt:_(s64) = contract G_FSQRT %x
+ %neg_one:_(s64) = G_FCONSTANT double -1.0
+ %rsq:_(s64) = contract G_FDIV %neg_one, %sqrt
+ %ext:_(s32) = G_TRUNC %rsq:_(s64)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: afn_rsq_f64
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: afn_rsq_f64
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s64) = contract afn G_FSQRT %x
+ ; GCN-NEXT: %one:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GCN-NEXT: %rsq:_(s64) = contract afn G_FDIV %one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s64) = G_ANYEXT %0:_(s32)
+ %sqrt:_(s64) = contract afn G_FSQRT %x
+ %one:_(s64) = G_FCONSTANT double 1.0
+ %rsq:_(s64) = contract afn G_FDIV %one, %sqrt
+ %ext:_(s32) = G_TRUNC %rsq:_(s64)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: afn_neg_rsq_f64
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: afn_neg_rsq_f64
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s64) = G_ANYEXT [[COPY]](s32)
+ ; GCN-NEXT: %sqrt:_(s64) = contract afn G_FSQRT %x
+ ; GCN-NEXT: %neg_one:_(s64) = G_FCONSTANT double -1.000000e+00
+ ; GCN-NEXT: %rsq:_(s64) = contract afn G_FDIV %neg_one, %sqrt
+ ; GCN-NEXT: %ext:_(s32) = G_TRUNC %rsq(s64)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s64) = G_ANYEXT %0:_(s32)
+ %sqrt:_(s64) = contract afn G_FSQRT %x
+ %neg_one:_(s64) = G_FCONSTANT double -1.0
+ %rsq:_(s64) = contract afn G_FDIV %neg_one, %sqrt
+ %ext:_(s32) = G_TRUNC %rsq:_(s64)
+ $vgpr0 = COPY %ext
+
+...
+
+
+---
+name: rsq_fract_num_f16
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_fract_num_f16
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %fract:_(s16) = G_FCONSTANT half 0xH3800
+ ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16)
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %fract
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %fract:_(s16) = G_FCONSTANT half 0.5
+ %rsq:_(s16) = contract G_FDIV %fract, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: neg_rsq_fract_num_f16
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: neg_rsq_fract_num_f16
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %neg_fract:_(s16) = G_FCONSTANT half 0xHB800
+ ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16)
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %neg_fract
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %neg_fract:_(s16) = G_FCONSTANT half -0.5
+ %rsq:_(s16) = contract G_FDIV %neg_fract, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+
+...
+
+---
+name: rsq_large_num_f16
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: rsq_large_num_f16
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %ten:_(s16) = G_FCONSTANT half 0xH4900
+ ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16)
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %ten
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %ten:_(s16) = G_FCONSTANT half 10.0
+ %rsq:_(s16) = contract G_FDIV %ten, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
+
+---
+name: neg_rsq_large_num_f16
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; GCN-LABEL: name: neg_rsq_large_num_f16
+ ; GCN: liveins: $vgpr0
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: %x:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GCN-NEXT: %neg_ten:_(s16) = G_FCONSTANT half 0xHC900
+ ; GCN-NEXT: [[INT:%[0-9]+]]:_(s16) = contract G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %x(s16)
+ ; GCN-NEXT: %rsq:_(s16) = contract G_FMUL [[INT]], %neg_ten
+ ; GCN-NEXT: %ext:_(s32) = G_ANYEXT %rsq(s16)
+ ; GCN-NEXT: $vgpr0 = COPY %ext(s32)
+ %0:_(s32) = COPY $vgpr0
+ %x:_(s16) = G_TRUNC %0:_(s32)
+ %sqrt:_(s16) = contract G_FSQRT %x
+ %neg_ten:_(s16) = G_FCONSTANT half -10.0
+ %rsq:_(s16) = contract G_FDIV %neg_ten, %sqrt
+ %ext:_(s32) = G_ANYEXT %rsq:_(s16)
+ $vgpr0 = COPY %ext
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 63a09e49e005..8ec7dfd93cd0 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -2182,9 +2182,8 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -2199,9 +2198,8 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
; GFX9-NEXT: global_store_short_d16_hi v[2:3], v0, off
@@ -2212,10 +2210,9 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: global_load_dword v0, v[0:1], off
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc_lo
@@ -2226,10 +2223,9 @@ define void @test_load_store_f32_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: global_load_b32 v0, v[0:1], off
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -2294,7 +2290,6 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
; GFX8-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[0:1]
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v5
; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -2323,7 +2318,6 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX9-NEXT: v_and_or_b32 v5, v1, s8, v4
; GFX9-NEXT: v_bfe_u32 v4, v4, 16, 1
; GFX9-NEXT: v_add3_u32 v4, v4, v5, s9
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v5
; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX9-NEXT: global_store_short_d16_hi v[2:3], v0, off
@@ -2343,14 +2337,13 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX10-NEXT: v_cmp_nlg_f64_e64 s4, |v[0:1]|, v[4:5]
; GFX10-NEXT: v_cndmask_b32_e64 v4, -1, 1, s5
; GFX10-NEXT: s_or_b32 vcc_lo, s4, vcc_lo
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_add_nc_u32_e32 v4, v6, v4
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
; GFX10-NEXT: v_cmp_u_f64_e32 vcc_lo, v[0:1], v[0:1]
; GFX10-NEXT: v_and_or_b32 v5, 0x80000000, v1, v4
; GFX10-NEXT: v_bfe_u32 v4, v4, 16, 1
; GFX10-NEXT: v_add3_u32 v4, v4, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v5, v5, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
; GFX10-NEXT: global_store_short_d16_hi v[2:3], v0, off
; GFX10-NEXT: s_setpc_b64 s[30:31]
@@ -2369,9 +2362,8 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX11-NEXT: v_cmp_gt_f64_e64 s1, |v[0:1]|, v[4:5]
; GFX11-NEXT: v_cmp_nlg_f64_e64 s0, |v[0:1]|, v[4:5]
; GFX11-NEXT: v_cndmask_b32_e64 v4, -1, 1, s1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: s_or_b32 vcc_lo, s0, vcc_lo
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
@@ -2380,7 +2372,7 @@ define void @test_load_store_f64_to_bf16(ptr addrspace(1) %in, ptr addrspace(1)
; GFX11-NEXT: v_bfe_u32 v4, v4, 16, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v4, v4, v5, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v5, v5, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc_lo
; GFX11-NEXT: global_store_d16_hi_b16 v[2:3], v0, off
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -8999,8 +8991,7 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -9014,9 +9005,8 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -9027,10 +9017,9 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -9042,11 +9031,10 @@ define bfloat @v_fadd_bf16(bfloat %a, bfloat %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -9104,16 +9092,14 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -9126,20 +9112,18 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX9-NEXT: v_add_f32_e32 v2, v3, v2
-; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -9153,14 +9137,13 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_add_f32_e32 v2, v3, v2
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
@@ -9176,16 +9159,15 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
; GFX11-NEXT: v_add_f32_e32 v2, v3, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
@@ -9255,8 +9237,7 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -9268,16 +9249,14 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -9293,27 +9272,24 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX9-NEXT: v_add_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v0
; GFX9-NEXT: v_add_f32_e32 v3, v4, v3
-; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -9331,18 +9307,17 @@ define <3 x bfloat> @v_fadd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_add_f32_e32 v4, v5, v4
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -9426,17 +9401,15 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX8-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -9447,16 +9420,14 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -9471,38 +9442,34 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v1
; GFX9-NEXT: v_add_f32_e32 v4, v5, v4
-; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_add_f32_e32 v1, v1, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX9-NEXT: v_add_f32_e32 v3, v5, v3
-; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -9523,31 +9490,30 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_add_f32_e32 v5, v7, v6
-; GFX10-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
+; GFX10-NEXT: v_add_f32_e32 v3, v7, v6
+; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_and_or_b32 v6, v4, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v1, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v6, v5, s4, 0x400000
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
+; GFX10-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fadd_v4bf16:
@@ -9555,45 +9521,42 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_add_f32_e32 v4, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_add_f32 v0, v0, v2 :: v_dual_and_b32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v5, v7, v6
-; GFX11-NEXT: v_and_or_b32 v6, v4, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-NEXT: v_dual_add_f32 v3, v7, v6 :: v_dual_add_f32 v4, v5, v4
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v9, v1, s0, 0x400000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v5, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v0, s0, 0x400000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX11-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = fadd <4 x bfloat> %a, %b
ret <4 x bfloat> %op
@@ -9717,17 +9680,15 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX8-NEXT: v_add_f32_e32 v3, v3, v7
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3
; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v6
@@ -9738,16 +9699,14 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX8-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5
@@ -9758,16 +9717,14 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX8-NEXT: v_add_f32_e32 v1, v1, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
@@ -9778,16 +9735,14 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX8-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
@@ -9806,74 +9761,66 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX9-NEXT: v_add_f32_e32 v8, v9, v8
-; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_add_f32_e32 v3, v3, v7
; GFX9-NEXT: v_add3_u32 v9, v9, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
-; GFX9-NEXT: v_add_f32_e32 v3, v3, v7
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v7, v7, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v6
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v2
; GFX9-NEXT: v_add_f32_e32 v7, v9, v7
-; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v2, v2, v6
; GFX9-NEXT: v_add3_u32 v9, v9, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
-; GFX9-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v6, v6, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; GFX9-NEXT: v_add_f32_e32 v6, v9, v6
-; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v1, v1, v5
; GFX9-NEXT: v_add3_u32 v9, v9, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX9-NEXT: v_add_f32_e32 v1, v1, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v5, v5, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v0
; GFX9-NEXT: v_add_f32_e32 v5, v9, v5
-; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v4
; GFX9-NEXT: v_add3_u32 v9, v9, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX9-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -9890,62 +9837,61 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_add_f32_e32 v8, v9, v8
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v6
-; GFX10-NEXT: v_add_f32_e32 v3, v3, v7
; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
-; GFX10-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v8, s4, 0x400000
-; GFX10-NEXT: v_add_f32_e32 v9, v11, v9
-; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_add_f32_e32 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX10-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX10-NEXT: v_add_f32_e32 v7, v10, v9
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
; GFX10-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX10-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX10-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_bfe_u32 v12, v7, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5
-; GFX10-NEXT: v_and_or_b32 v12, v9, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX10-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v1
; GFX10-NEXT: v_bfe_u32 v13, v2, 16, 1
-; GFX10-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX10-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX10-NEXT: v_add_f32_e32 v6, v10, v6
+; GFX10-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_add_f32_e32 v6, v11, v6
-; GFX10-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v11, v2, s4, 0x400000
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v11, v12, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v12, v6, 16, 1
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_add_f32_e32 v1, v1, v5
; GFX10-NEXT: v_add_f32_e32 v5, v15, v13
-; GFX10-NEXT: v_and_or_b32 v14, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v14, 0x400000, v3
; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
; GFX10-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX10-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX10-NEXT: v_bfe_u32 v12, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX10-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v15, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v11, v5, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX10-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo
+; GFX10-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v0, s4, 0x400000
-; GFX10-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v0
+; GFX10-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -9953,81 +9899,80 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX10-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
+; GFX10-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fadd_v8bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_add_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_add_f32 v8, v9, v8 :: v_dual_and_b32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_add_f32_e32 v9, v11, v9
-; GFX11-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_or_b32 v12, v9, s0, 0x400000
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add_f32_e32 v3, v3, v7
-; GFX11-NEXT: v_and_or_b32 v7, v8, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v8, v9, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX11-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_add_f32_e32 v7, v10, v9
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v14, v3, s0, 0x400000
-; GFX11-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX11-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_add_f32 v6, v11, v6 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_and_or_b32 v11, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_cndmask_b32 v7, v11, v12 :: v_dual_add_f32 v2, v2, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_f32_e32 v6, v10, v6
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v12, v6, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v9, v6, s0, 0x400000
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_add_f32_e32 v1, v1, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v4
; GFX11-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX11-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_cndmask_b32 v4, v4, v10
; GFX11-NEXT: v_add_f32_e32 v5, v15, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v15, v1, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX11-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v11, v5, s0, 0x400000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX11-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX11-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v13, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -10036,9 +9981,9 @@ define <8 x bfloat> @v_fadd_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX11-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = fadd <8 x bfloat> %a, %b
ret <8 x bfloat> %op
@@ -10263,16 +10208,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX8-NEXT: v_add_f32_e32 v7, v7, v15
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
; GFX8-NEXT: v_bfe_u32 v15, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v7
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v14
@@ -10283,16 +10226,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX8-NEXT: v_add_f32_e32 v6, v6, v14
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
; GFX8-NEXT: v_bfe_u32 v14, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v14, vcc, v14, v6
; GFX8-NEXT: v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v13
@@ -10303,16 +10244,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX8-NEXT: v_add_f32_e32 v5, v5, v13
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_bfe_u32 v13, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v5
; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v12
@@ -10323,16 +10262,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX8-NEXT: v_add_f32_e32 v4, v4, v12
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_bfe_u32 v12, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v4
; GFX8-NEXT: v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11
@@ -10343,16 +10280,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX8-NEXT: v_add_f32_e32 v3, v3, v11
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_bfe_u32 v11, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v3
; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v10
@@ -10363,16 +10298,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX8-NEXT: v_add_f32_e32 v2, v2, v10
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_bfe_u32 v10, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v2
; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v9
@@ -10383,16 +10316,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX8-NEXT: v_add_f32_e32 v1, v1, v9
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v8
@@ -10403,16 +10334,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX8-NEXT: v_add_f32_e32 v0, v0, v8
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_bfe_u32 v8, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v0
; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
@@ -10439,146 +10368,130 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
-; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_add_f32_e32 v7, v7, v15
; GFX9-NEXT: v_add3_u32 v17, v17, v16, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
-; GFX9-NEXT: v_add_f32_e32 v7, v7, v15
-; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v15, v15, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v14
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
; GFX9-NEXT: v_add_f32_e32 v15, v17, v15
-; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v6, v6, v14
; GFX9-NEXT: v_add3_u32 v17, v17, v15, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
-; GFX9-NEXT: v_add_f32_e32 v6, v6, v14
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v14, v14, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v14, 16, v13
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
; GFX9-NEXT: v_add_f32_e32 v14, v17, v14
-; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v5, v5, v13
; GFX9-NEXT: v_add3_u32 v17, v17, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
-; GFX9-NEXT: v_add_f32_e32 v5, v5, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v13, v13, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v12
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
; GFX9-NEXT: v_add_f32_e32 v13, v17, v13
-; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v4, v4, v12
; GFX9-NEXT: v_add3_u32 v17, v17, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_add_f32_e32 v4, v4, v12
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v12, v12, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_add_f32_e32 v12, v17, v12
-; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v3, v3, v11
; GFX9-NEXT: v_add3_u32 v17, v17, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
-; GFX9-NEXT: v_add_f32_e32 v3, v3, v11
-; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v11, v11, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v10
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; GFX9-NEXT: v_add_f32_e32 v11, v17, v11
-; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v2, v2, v10
; GFX9-NEXT: v_add3_u32 v17, v17, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
-; GFX9-NEXT: v_add_f32_e32 v2, v2, v10
-; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v10, v10, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v9
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v1
; GFX9-NEXT: v_add_f32_e32 v10, v17, v10
-; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v1, v1, v9
; GFX9-NEXT: v_add3_u32 v17, v17, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
-; GFX9-NEXT: v_add_f32_e32 v1, v1, v9
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v9, v9, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v8
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v0
; GFX9-NEXT: v_add_f32_e32 v9, v17, v9
-; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v8
; GFX9-NEXT: v_add3_u32 v17, v17, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
-; GFX9-NEXT: v_add_f32_e32 v0, v0, v8
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v8, v8, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -10599,27 +10512,26 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_add_f32_e32 v16, v17, v16
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX10-NEXT: v_add_f32_e32 v7, v7, v15
; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_bfe_u32 v15, v16, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v16, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX10-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
; GFX10-NEXT: v_add_f32_e32 v17, v18, v17
; GFX10-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
; GFX10-NEXT: v_add_f32_e32 v6, v6, v14
; GFX10-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v7, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX10-NEXT: v_bfe_u32 v21, v17, 16, 1
; GFX10-NEXT: v_cndmask_b32_e32 v15, v15, v20, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v5
-; GFX10-NEXT: v_and_or_b32 v16, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX10-NEXT: v_add3_u32 v14, v21, v17, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
@@ -10633,7 +10545,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_add_f32_e32 v5, v5, v13
; GFX10-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc_lo
; GFX10-NEXT: v_add3_u32 v16, v18, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v12
; GFX10-NEXT: v_bfe_u32 v20, v17, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
@@ -10643,10 +10555,10 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v6, v16, v13, vcc_lo
; GFX10-NEXT: v_add_f32_e32 v13, v19, v18
; GFX10-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v5, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX10-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX10-NEXT: v_add_f32_e32 v4, v4, v12
; GFX10-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
@@ -10656,14 +10568,14 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v19, v13, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX10-NEXT: v_add_f32_e32 v12, v18, v12
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v10
; GFX10-NEXT: v_add_f32_e32 v3, v3, v11
-; GFX10-NEXT: v_and_or_b32 v22, v12, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX10-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX10-NEXT: v_bfe_u32 v17, v12, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v2
@@ -10675,12 +10587,12 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
; GFX10-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX10-NEXT: v_bfe_u32 v23, v18, 16, 1
; GFX10-NEXT: v_add_f32_e32 v2, v2, v10
; GFX10-NEXT: v_cndmask_b32_e32 v12, v17, v22, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_and_or_b32 v17, v18, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX10-NEXT: v_add3_u32 v10, v23, v18, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v1
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -10689,8 +10601,8 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_and_or_b32 v18, v2, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v2
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX10-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX10-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -10702,17 +10614,17 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v23, v19, 16, 1
; GFX10-NEXT: v_add_f32_e32 v1, v1, v9
; GFX10-NEXT: v_add_f32_e32 v9, v22, v20
-; GFX10-NEXT: v_and_or_b32 v22, v19, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v19
; GFX10-NEXT: v_add_f32_e32 v0, v0, v8
; GFX10-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
; GFX10-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
; GFX10-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX10-NEXT: v_and_or_b32 v24, v9, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v25, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v22, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_bfe_u32 v20, v0, 16, 1
; GFX10-NEXT: v_add3_u32 v23, v23, v9, 0x7fff
@@ -10741,12 +10653,11 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_add_f32 v16, v17, v16 :: v_dual_and_b32 v15, 0xffff0000, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_and_or_b32 v20, v16, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_add_f32_e32 v17, v18, v17
; GFX11-NEXT: v_add_f32_e32 v6, v6, v14
@@ -10759,13 +10670,13 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_add_f32_e32 v7, v7, v15
; GFX11-NEXT: v_bfe_u32 v15, v16, 16, 1
; GFX11-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v16, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_dual_cndmask_b32 v15, v15, v20 :: v_dual_lshlrev_b32 v20, 16, v5
; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX11-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v18, v6, 16, 1
@@ -10787,32 +10698,32 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_add_f32_e32 v5, v5, v13
-; GFX11-NEXT: v_and_or_b32 v13, v6, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_cndmask_b32 v6, v16, v13 :: v_dual_add_f32 v13, v19, v18
; GFX11-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v18, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-NEXT: v_perm_b32 v6, v6, v14, 0x7060302
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; GFX11-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v5, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-NEXT: v_add_f32_e32 v12, v18, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
; GFX11-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v10
-; GFX11-NEXT: v_and_or_b32 v22, v12, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v13, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX11-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX11-NEXT: v_and_or_b32 v21, v4, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX11-NEXT: v_perm_b32 v5, v5, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v17, v12, 16, 1
@@ -10828,7 +10739,7 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v23, v18, 16, 1
-; GFX11-NEXT: v_and_or_b32 v17, v18, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_add_f32 v2, v2, v10 :: v_dual_and_b32 v1, 0xffff0000, v1
; GFX11-NEXT: v_add_f32_e32 v3, v3, v11
@@ -10838,13 +10749,13 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_bfe_u32 v20, v3, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v20, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v3, v19, v20, vcc_lo
; GFX11-NEXT: v_bfe_u32 v19, v2, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_and_or_b32 v18, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -10861,13 +10772,13 @@ define <16 x bfloat> @v_fadd_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_add_f32_e32 v9, v22, v20
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v22, v19, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v25, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX11-NEXT: v_and_or_b32 v24, v9, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v9
; GFX11-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v22, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_bfe_u32 v20, v0, 16, 1
@@ -11434,16 +11345,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX8-NEXT: v_add_f32_e32 v14, v14, v30
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
; GFX8-NEXT: v_bfe_u32 v30, v14, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX8-NEXT: v_add_u32_e32 v30, vcc, v30, v14
; GFX8-NEXT: v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT: v_and_b32_e32 v32, 0xff800000, v14
-; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29
@@ -11465,29 +11374,25 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_add_f32_e32 v30, v15, v30
; GFX8-NEXT: v_bfe_u32 v15, v33, 16, 1
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v33
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
; GFX8-NEXT: v_bfe_u32 v33, v30, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v30
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v30
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; GFX8-NEXT: v_cndmask_b32_e32 v30, v33, v34, vcc
; GFX8-NEXT: v_bfe_u32 v33, v32, 16, 1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v32
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
; GFX8-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v29, vcc, v29, v13
; GFX8-NEXT: v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
@@ -11498,16 +11403,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v29
; GFX8-NEXT: v_add_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
; GFX8-NEXT: v_bfe_u32 v28, v12, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v28, vcc, v28, v12
; GFX8-NEXT: v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
@@ -11518,16 +11421,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX8-NEXT: v_add_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
; GFX8-NEXT: v_bfe_u32 v27, v11, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v27, vcc, v27, v11
; GFX8-NEXT: v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v11
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
@@ -11538,16 +11439,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX8-NEXT: v_add_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
; GFX8-NEXT: v_bfe_u32 v26, v10, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v26, vcc, v26, v10
; GFX8-NEXT: v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v10
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
@@ -11558,16 +11457,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX8-NEXT: v_add_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
; GFX8-NEXT: v_bfe_u32 v25, v9, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v25, vcc, v25, v9
; GFX8-NEXT: v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v9
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
@@ -11578,16 +11475,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX8-NEXT: v_add_f32_e32 v8, v8, v24
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
; GFX8-NEXT: v_bfe_u32 v24, v8, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v24, vcc, v24, v8
; GFX8-NEXT: v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v8
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
@@ -11598,16 +11493,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX8-NEXT: v_add_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
; GFX8-NEXT: v_bfe_u32 v23, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v23, vcc, v23, v7
; GFX8-NEXT: v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
@@ -11618,16 +11511,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX8-NEXT: v_add_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
; GFX8-NEXT: v_bfe_u32 v22, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v22, vcc, v22, v6
; GFX8-NEXT: v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
@@ -11638,16 +11529,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX8-NEXT: v_add_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
; GFX8-NEXT: v_bfe_u32 v21, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v21, vcc, v21, v5
; GFX8-NEXT: v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
@@ -11658,16 +11547,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX8-NEXT: v_add_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
; GFX8-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v20, vcc, v20, v4
; GFX8-NEXT: v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
@@ -11678,16 +11565,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX8-NEXT: v_add_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
; GFX8-NEXT: v_bfe_u32 v19, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v19, vcc, v19, v3
; GFX8-NEXT: v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
@@ -11698,16 +11583,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX8-NEXT: v_add_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
; GFX8-NEXT: v_bfe_u32 v18, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v18, vcc, v18, v2
; GFX8-NEXT: v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
@@ -11718,16 +11601,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX8-NEXT: v_add_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
; GFX8-NEXT: v_bfe_u32 v17, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
@@ -11738,16 +11619,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX8-NEXT: v_add_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0
; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -11788,292 +11667,260 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
; GFX9-NEXT: v_add_f32_e32 v31, v32, v31
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v14, v14, v30
; GFX9-NEXT: v_add3_u32 v32, v32, v31, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
-; GFX9-NEXT: v_add_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v32, 0xff800000, v14
+; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_add3_u32 v30, v30, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX9-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
+; GFX9-NEXT: v_add_f32_e32 v30, v32, v30
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_add_f32_e32 v13, v13, v29
-; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
-; GFX9-NEXT: v_add_f32_e32 v30, v32, v30
; GFX9-NEXT: v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v30
+; GFX9-NEXT: v_add_f32_e32 v13, v13, v29
; GFX9-NEXT: v_add3_u32 v32, v32, v30, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v30
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
+; GFX9-NEXT: v_bfe_u32 v29, v13, 16, 1
; GFX9-NEXT: v_cndmask_b32_e32 v30, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX9-NEXT: v_add3_u32 v29, v29, v13, s4
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v13
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v29, v32, vcc
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; GFX9-NEXT: v_add_f32_e32 v32, v32, v29
+; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
+; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_add_f32_e32 v12, v12, v28
+; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
+; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v29
-; GFX9-NEXT: v_add_f32_e32 v32, v32, v33
+; GFX9-NEXT: v_lshlrev_b32_e32 v34, 16, v29
+; GFX9-NEXT: v_add_f32_e32 v33, v33, v34
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_add_f32_e32 v29, v15, v29
-; GFX9-NEXT: v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v32
-; GFX9-NEXT: v_add3_u32 v15, v15, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v29
-; GFX9-NEXT: v_add3_u32 v32, v32, v29, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_bfe_u32 v15, v33, 16, 1
+; GFX9-NEXT: v_add3_u32 v15, v15, v33, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v33
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
+; GFX9-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
+; GFX9-NEXT: v_add3_u32 v33, v33, v29, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
-; GFX9-NEXT: v_cndmask_b32_e32 v29, v32, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX9-NEXT: v_add3_u32 v32, v32, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v12
-; GFX9-NEXT: v_add_f32_e32 v32, v33, v32
+; GFX9-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_add3_u32 v33, v33, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_add_f32_e32 v12, v12, v28
; GFX9-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
-; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX9-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v11
; GFX9-NEXT: v_add_f32_e32 v28, v33, v28
-; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v11, v11, v27
; GFX9-NEXT: v_add3_u32 v33, v33, v28, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
-; GFX9-NEXT: v_add_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v27, v27, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX9-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v10
; GFX9-NEXT: v_add_f32_e32 v27, v33, v27
-; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v10, v10, v26
; GFX9-NEXT: v_add3_u32 v33, v33, v27, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
-; GFX9-NEXT: v_add_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v26, v26, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX9-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v9
; GFX9-NEXT: v_add_f32_e32 v26, v33, v26
-; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v9, v9, v25
; GFX9-NEXT: v_add3_u32 v33, v33, v26, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
-; GFX9-NEXT: v_add_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v25, v25, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX9-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v8
; GFX9-NEXT: v_add_f32_e32 v25, v33, v25
-; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v8, v8, v24
; GFX9-NEXT: v_add3_u32 v33, v33, v25, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
-; GFX9-NEXT: v_add_f32_e32 v8, v8, v24
-; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v24, v24, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX9-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
; GFX9-NEXT: v_add_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v7, v7, v23
; GFX9-NEXT: v_add3_u32 v33, v33, v24, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
-; GFX9-NEXT: v_add_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v23, v23, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
; GFX9-NEXT: v_add_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v6, v6, v22
; GFX9-NEXT: v_add3_u32 v33, v33, v23, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
-; GFX9-NEXT: v_add_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v22, v22, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
; GFX9-NEXT: v_add_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v5, v5, v21
; GFX9-NEXT: v_add3_u32 v33, v33, v22, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
-; GFX9-NEXT: v_add_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v21, v21, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
; GFX9-NEXT: v_add_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v4, v4, v20
; GFX9-NEXT: v_add3_u32 v33, v33, v21, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
-; GFX9-NEXT: v_add_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v20, v20, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
; GFX9-NEXT: v_add_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v3, v3, v19
; GFX9-NEXT: v_add3_u32 v33, v33, v20, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
-; GFX9-NEXT: v_add_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v19, v19, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
; GFX9-NEXT: v_add_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v2, v2, v18
; GFX9-NEXT: v_add3_u32 v33, v33, v19, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
-; GFX9-NEXT: v_add_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v18, v18, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; GFX9-NEXT: v_add_f32_e32 v18, v33, v18
-; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v1, v1, v17
; GFX9-NEXT: v_add3_u32 v33, v33, v18, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
-; GFX9-NEXT: v_add_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v17, v17, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
; GFX9-NEXT: v_add_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v16
; GFX9-NEXT: v_add3_u32 v33, v33, v17, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
-; GFX9-NEXT: v_add_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v16, v16, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -12098,7 +11945,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-LABEL: v_fadd_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32
; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
@@ -12163,7 +12010,6 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v55, v11, 16, 1
; GFX10-NEXT: v_bfe_u32 v65, v49, 16, 1
; GFX10-NEXT: v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT: s_mov_b32 s23, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
@@ -12179,10 +12025,10 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_add_f32_e32 v1, v1, v17
; GFX10-NEXT: v_add_f32_e32 v17, v26, v50
; GFX10-NEXT: v_add_f32_e32 v0, v0, v16
-; GFX10-NEXT: v_and_or_b32 v54, v39, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v64, v11, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v66, v49, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v68, v10, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX10-NEXT: v_or_b32_e32 v64, 0x400000, v11
+; GFX10-NEXT: v_or_b32_e32 v66, 0x400000, v49
+; GFX10-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v39, v39
; GFX10-NEXT: v_add3_u32 v39, v53, v39, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v11, v11
@@ -12220,28 +12066,28 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v27, v14, 16, 1
; GFX10-NEXT: v_bfe_u32 v29, v35, 16, 1
; GFX10-NEXT: v_bfe_u32 v34, v13, 16, 1
-; GFX10-NEXT: v_and_or_b32 v48, v37, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v52, v12, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v37, v37
; GFX10-NEXT: v_add3_u32 v37, v38, v37, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v12, v12
; GFX10-NEXT: v_add3_u32 v12, v50, v12, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v18, v18
; GFX10-NEXT: v_add3_u32 v54, v54, v18, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v18, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v18
; GFX10-NEXT: v_cmp_u_f32_e64 s11, v1, v1
; GFX10-NEXT: v_add3_u32 v64, v64, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v1, v1, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e64 s12, v17, v17
; GFX10-NEXT: v_add3_u32 v66, v66, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v17, v17, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s22, v0, v0
; GFX10-NEXT: v_add3_u32 v68, v68, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v0, v0, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v26, v33, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v28, v14, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v30, v35, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v36, v13, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v0
+; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v33
+; GFX10-NEXT: v_or_b32_e32 v28, 0x400000, v14
+; GFX10-NEXT: v_or_b32_e32 v30, 0x400000, v35
+; GFX10-NEXT: v_or_b32_e32 v36, 0x400000, v13
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1
@@ -12260,12 +12106,12 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e64 v17, v66, v17, s12
; GFX10-NEXT: v_cndmask_b32_e64 v0, v68, v0, s22
; GFX10-NEXT: v_cndmask_b32_e64 v1, v64, v1, s11
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v15
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_and_or_b32 v27, v51, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v51
; GFX10-NEXT: v_bfe_u32 v35, v9, 16, 1
; GFX10-NEXT: v_bfe_u32 v38, v25, 16, 1
-; GFX10-NEXT: v_and_or_b32 v67, v24, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v67, 0x400000, v24
; GFX10-NEXT: v_cmp_u_f32_e64 s13, v51, v51
; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff
; GFX10-NEXT: v_bfe_u32 v51, v7, 16, 1
@@ -12282,51 +12128,51 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v36, v3, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v19, v19
; GFX10-NEXT: v_add3_u32 v48, v48, v19, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v19, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v19
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v2, v2
; GFX10-NEXT: v_add3_u32 v52, v52, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v2, v2, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX10-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX10-NEXT: v_perm_b32 v1, v1, v18, 0x7060302
-; GFX10-NEXT: v_and_or_b32 v34, v9, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v50, v25, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v50, 0x400000, v25
; GFX10-NEXT: v_bfe_u32 v53, v8, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9
; GFX10-NEXT: v_add3_u32 v9, v35, v9, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v35, v7, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v35, 0x400000, v7
; GFX10-NEXT: v_cmp_u_f32_e64 s15, v25, v25
; GFX10-NEXT: v_add3_u32 v25, v38, v25, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v23, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s18, v7, v7
; GFX10-NEXT: v_add3_u32 v7, v51, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v51, v6, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v51, 0x400000, v6
; GFX10-NEXT: v_cmp_u_f32_e64 s20, v6, v6
; GFX10-NEXT: v_add3_u32 v6, v65, v6, 0x7fff
; GFX10-NEXT: v_bfe_u32 v65, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v21, v21
; GFX10-NEXT: v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v21, v21, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v21
; GFX10-NEXT: v_cmp_u_f32_e64 s5, v4, v4
; GFX10-NEXT: v_add3_u32 v28, v28, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v4, v4, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX10-NEXT: v_cmp_u_f32_e64 s6, v20, v20
; GFX10-NEXT: v_add3_u32 v30, v30, v20, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v20, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v20
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v3, v3
; GFX10-NEXT: v_add3_u32 v36, v36, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v3, v3, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v3
; GFX10-NEXT: v_cndmask_b32_e64 v19, v48, v19, s8
; GFX10-NEXT: v_cndmask_b32_e64 v2, v52, v2, s9
-; GFX10-NEXT: v_and_or_b32 v55, v8, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v55, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e64 s16, v8, v8
; GFX10-NEXT: v_add3_u32 v8, v53, v8, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v53, v23, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v53, 0x400000, v23
; GFX10-NEXT: v_cmp_u_f32_e64 s19, v23, v23
; GFX10-NEXT: v_add3_u32 v23, v38, v23, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v22, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v65, v65, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v5, v5, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX10-NEXT: v_cndmask_b32_e64 v21, v26, v21, s4
; GFX10-NEXT: v_cndmask_b32_e64 v4, v28, v4, s5
; GFX10-NEXT: v_cndmask_b32_e64 v20, v30, v20, s6
@@ -12334,7 +12180,7 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX10-NEXT: v_cmp_u_f32_e64 s21, v22, v22
; GFX10-NEXT: v_add3_u32 v38, v38, v22, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v22, v22, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v22
; GFX10-NEXT: v_cndmask_b32_e32 v5, v65, v5, vcc_lo
; GFX10-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
@@ -12358,14 +12204,14 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v13, v13, v29, 0x7060302
; GFX10-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX10-NEXT: v_add_f32_e32 v17, v32, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v32
+; GFX10-NEXT: v_add_f32_e32 v17, v31, v17
; GFX10-NEXT: v_add_f32_e32 v15, v15, v18
; GFX10-NEXT: v_bfe_u32 v18, v17, 16, 1
; GFX10-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v17, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v15, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v15, v15
; GFX10-NEXT: v_add3_u32 v17, v18, v17, 0x7fff
@@ -12378,212 +12224,219 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-LABEL: v_fadd_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: scratch_load_b32 v32, off, s32
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v26
+; GFX11-NEXT: v_dual_add_f32 v5, v5, v21 :: v_dual_and_b32 v26, 0xffff0000, v26
; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_add_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_add_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
+; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
; GFX11-NEXT: v_bfe_u32 v135, v1, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_or_b32 v144, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v5
+; GFX11-NEXT: v_or_b32_e32 v144, 0x400000, v1
+; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_add3_u32 v135, v135, v1, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_dual_add_f32 v17, v86, v85 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_dual_add_f32 v7, v7, v23 :: v_dual_lshlrev_b32 v36, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
-; GFX11-NEXT: v_dual_add_f32 v0, v0, v16 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_add_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_add_f32 v3, v3, v19 :: v_dual_lshlrev_b32 v54, 16, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
+; GFX11-NEXT: v_dual_add_f32 v19, v82, v81 :: v_dual_lshlrev_b32 v64, 16, v7
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v130, 0x400000, v19
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
+; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
+; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_add_f32 v17, v86, v85 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_dual_add_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
+; GFX11-NEXT: v_or_b32_e32 v128, 0x400000, v3
+; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v146, 0x400000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_or_b32 v86, v24, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v96, v7, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
+; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v10
+; GFX11-NEXT: v_add_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_dual_add_f32 v24, v64, v55 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_add_f32_e32 v7, v7, v23
+; GFX11-NEXT: v_dual_add_f32 v23, v66, v65 :: v_dual_add_f32 v18, v84, v83
+; GFX11-NEXT: v_dual_add_f32 v9, v9, v25 :: v_dual_and_b32 v28, 0xffff0000, v28
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
+; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v86, 0x400000, v24
+; GFX11-NEXT: v_or_b32_e32 v98, 0x400000, v23
+; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
; GFX11-NEXT: v_add3_u32 v85, v85, v24, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_dual_add_f32 v23, v66, v65 :: v_dual_lshlrev_b32 v48, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_add_f32 v21, v70, v69 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
-; GFX11-NEXT: v_add_f32_e32 v2, v2, v18
-; GFX11-NEXT: v_add_f32_e32 v18, v84, v83
-; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
-; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_and_or_b32 v84, v8, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v98, v23, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v100, v6, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v112, v5, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v114, v21, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
-; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
+; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_dual_add_f32 v3, v3, v19 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_add_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v51, 16, v25
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX11-NEXT: v_dual_add_f32 v4, v4, v20 :: v_dual_and_b32 v25, 0xffff0000, v25
+; GFX11-NEXT: v_or_b32_e32 v96, 0x400000, v7
+; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_add_f32_e32 v4, v4, v20
; GFX11-NEXT: v_add_f32_e32 v20, v80, v71
-; GFX11-NEXT: v_dual_add_f32 v19, v82, v81 :: v_dual_and_b32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_add_f32 v9, v9, v25 :: v_dual_and_b32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_add_f32_e32 v25, v54, v53
+; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v80, 0x400000, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_add_f32 v10, v10, v26 :: v_dual_and_b32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_dual_add_f32 v12, v12, v28 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_add_f32_e32 v28, v48, v39
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_add_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_dual_add_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_add_f32 v27, v50, v49 :: v_dual_add_f32 v26, v52, v51
-; GFX11-NEXT: v_dual_add_f32 v29, v38, v37 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
+; GFX11-NEXT: v_dual_add_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_dual_add_f32 v10, v10, v26 :: v_dual_and_b32 v29, 0xffff0000, v29
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_add_f32_e32 v26, v52, v51
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_add_f32_e32 v6, v6, v22
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-NEXT: v_dual_add_f32 v11, v11, v27 :: v_dual_lshlrev_b32 v34, 16, v14
+; GFX11-NEXT: v_dual_add_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v33, 16, v30
+; GFX11-NEXT: v_dual_add_f32 v27, v50, v49 :: v_dual_lshlrev_b32 v38, 16, v12
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_dual_add_f32 v25, v54, v53 :: v_dual_and_b32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_dual_add_f32 v13, v13, v29 :: v_dual_and_b32 v30, 0xffff0000, v30
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_dual_add_f32 v12, v12, v28 :: v_dual_and_b32 v15, 0xffff0000, v15
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add_f32_e32 v14, v14, v30
+; GFX11-NEXT: v_add_f32_e32 v28, v48, v39
; GFX11-NEXT: v_dual_add_f32 v30, v36, v35 :: v_dual_add_f32 v33, v34, v33
-; GFX11-NEXT: v_and_or_b32 v48, v13, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
; GFX11-NEXT: v_bfe_u32 v35, v14, 16, 1
-; GFX11-NEXT: v_and_or_b32 v36, v14, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_bfe_u32 v16, v33, 16, 1
-; GFX11-NEXT: v_and_or_b32 v34, v33, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_add3_u32 v35, v35, v14, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v30
; GFX11-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v38, v30, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_add3_u32 v37, v37, v30, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v50, v29, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v13
+; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v34, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-NEXT: v_or_b32_e32 v50, 0x400000, v29
; GFX11-NEXT: v_bfe_u32 v51, v12, 16, 1
; GFX11-NEXT: v_add3_u32 v49, v49, v29, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v52, v12, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX11-NEXT: v_cndmask_b32_e32 v14, v35, v36, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
; GFX11-NEXT: v_add3_u32 v51, v51, v12, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v54, v28, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v28
; GFX11-NEXT: v_bfe_u32 v55, v11, 16, 1
-; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v30, v37, v38, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_and_or_b32 v64, v11, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v64, 0x400000, v11
; GFX11-NEXT: v_bfe_u32 v65, v27, 16, 1
; GFX11-NEXT: v_add3_u32 v55, v55, v11, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v66, v27, s0, 0x400000
; GFX11-NEXT: v_cndmask_b32_e32 v13, v39, v48, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-NEXT: v_or_b32_e32 v66, 0x400000, v27
; GFX11-NEXT: v_bfe_u32 v67, v10, 16, 1
; GFX11-NEXT: v_add3_u32 v65, v65, v27, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v68, v10, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX11-NEXT: v_cndmask_b32_e32 v29, v49, v50, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
; GFX11-NEXT: v_add3_u32 v67, v67, v10, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v70, v26, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
-; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v70, 0x400000, v26
+; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v12, v51, v52, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: v_and_or_b32 v80, v9, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
-; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v82, v25, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v25
+; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
+; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v28, v53, v54, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
-; GFX11-NEXT: v_and_or_b32 v102, v22, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v84, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v100, 0x400000, v6
; GFX11-NEXT: v_cndmask_b32_e32 v11, v55, v64, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v116, v4, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
-; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
+; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v22
+; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v27, v65, v66, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_and_or_b32 v118, v20, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
-; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v130, v19, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v114, 0x400000, v21
+; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v10, v67, v68, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
-; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v134, v18, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v116, 0x400000, v4
+; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
+; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v118, 0x400000, v20
; GFX11-NEXT: v_cndmask_b32_e32 v26, v69, v70, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v146, v17, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v134, 0x400000, v18
; GFX11-NEXT: v_bfe_u32 v147, v0, 16, 1
-; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v9, v71, v80, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v33, v0, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v147, v147, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v132, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v25, v81, v82, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v128, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v132, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
+; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
+; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v8, v83, v84, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v24, v85, v86, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v7, v87, v96, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
@@ -12622,22 +12475,21 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v2, v131, v132, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v3, v119, v128, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
-; GFX11-NEXT: v_add_f32_e32 v17, v32, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_add_f32 v17, v31, v17 :: v_dual_and_b32 v18, 0xffff0000, v32
; GFX11-NEXT: v_add_f32_e32 v15, v15, v18
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v18, v17, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_or_b32 v21, v15, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX11-NEXT: v_add3_u32 v18, v18, v17, 0x7fff
; GFX11-NEXT: v_add3_u32 v19, v19, v15, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -12678,8 +12530,7 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -12692,9 +12543,8 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -12704,10 +12554,9 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_add_f32_e32 v0, 1.0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -12718,11 +12567,10 @@ define bfloat @v_fadd_bf16_fpimm_0(bfloat %arg0) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v0, 1.0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -12761,8 +12609,7 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -12775,9 +12622,8 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
; GFX9-NEXT: v_add_f32_e32 v0, 0x42280000, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -12787,10 +12633,9 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_add_f32_e32 v0, 0x42280000, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -12801,11 +12646,10 @@ define bfloat @v_fadd_bf16_fpimm_1(bfloat %arg0) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e32 v0, 0x42280000, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -12849,8 +12693,7 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -12864,9 +12707,8 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
; GFX9-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -12877,10 +12719,9 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -12892,11 +12733,10 @@ define bfloat @v_fsub_bf16(bfloat %a, bfloat %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -12954,16 +12794,14 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX8-NEXT: v_sub_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -12976,20 +12814,18 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX9-NEXT: v_sub_f32_e32 v2, v3, v2
-; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX9-NEXT: v_sub_f32_e32 v0, v0, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -13003,14 +12839,13 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_sub_f32_e32 v2, v3, v2
; GFX10-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
@@ -13026,16 +12861,15 @@ define <2 x bfloat> @v_fsub_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX11-NEXT: v_sub_f32_e32 v2, v3, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
@@ -13105,8 +12939,7 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -13118,16 +12951,14 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX8-NEXT: v_sub_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -13143,27 +12974,24 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX9-NEXT: v_sub_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v0
; GFX9-NEXT: v_sub_f32_e32 v3, v4, v3
-; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX9-NEXT: v_sub_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_sub_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -13181,18 +13009,17 @@ define <3 x bfloat> @v_fsub_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_sub_f32_e32 v4, v5, v4
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_sub_f32_e32 v0, v0, v2
; GFX10-NEXT: v_sub_f32_e32 v1, v1, v3
; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -13276,17 +13103,15 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX8-NEXT: v_sub_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -13297,16 +13122,14 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX8-NEXT: v_sub_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -13321,38 +13144,34 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v1
; GFX9-NEXT: v_sub_f32_e32 v4, v5, v4
-; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_sub_f32_e32 v1, v1, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_sub_f32_e32 v1, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX9-NEXT: v_sub_f32_e32 v3, v5, v3
-; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX9-NEXT: v_sub_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_sub_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -13373,31 +13192,30 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_sub_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_sub_f32_e32 v5, v7, v6
-; GFX10-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
+; GFX10-NEXT: v_sub_f32_e32 v3, v7, v6
+; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_sub_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_and_or_b32 v6, v4, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v1, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v6, v5, s4, 0x400000
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
+; GFX10-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fsub_v4bf16:
@@ -13405,45 +13223,42 @@ define <4 x bfloat> @v_fsub_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_sub_f32_e32 v4, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_sub_f32 v0, v0, v2 :: v_dual_and_b32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_sub_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_sub_f32_e32 v5, v7, v6
-; GFX11-NEXT: v_and_or_b32 v6, v4, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-NEXT: v_dual_sub_f32 v3, v7, v6 :: v_dual_sub_f32 v4, v5, v4
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v9, v1, s0, 0x400000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v5, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v0, s0, 0x400000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX11-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = fsub <4 x bfloat> %a, %b
ret <4 x bfloat> %op
@@ -13481,8 +13296,7 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -13496,9 +13310,8 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -13509,10 +13322,9 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -13524,11 +13336,10 @@ define bfloat @v_fmul_bf16(bfloat %a, bfloat %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -13586,16 +13397,14 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -13608,20 +13417,18 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v2, v3, v2
-; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -13635,14 +13442,13 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v2, v3, v2
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
@@ -13658,16 +13464,15 @@ define <2 x bfloat> @v_fmul_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX11-NEXT: v_mul_f32_e32 v2, v3, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
@@ -13737,8 +13542,7 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -13750,16 +13554,14 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -13775,27 +13577,24 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -13813,18 +13612,17 @@ define <3 x bfloat> @v_fmul_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_mul_f32_e32 v4, v5, v4
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -13908,17 +13706,15 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -13929,16 +13725,14 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -13953,38 +13747,34 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v1
; GFX9-NEXT: v_mul_f32_e32 v4, v5, v4
-; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -14005,31 +13795,30 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_mul_f32_e32 v5, v7, v6
-; GFX10-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
+; GFX10-NEXT: v_mul_f32_e32 v3, v7, v6
+; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_and_or_b32 v6, v4, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v1, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v6, v5, s4, 0x400000
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
+; GFX10-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fmul_v4bf16:
@@ -14037,45 +13826,42 @@ define <4 x bfloat> @v_fmul_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_mul_f32_e32 v4, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_and_b32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_mul_f32_e32 v5, v7, v6
-; GFX11-NEXT: v_and_or_b32 v6, v4, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-NEXT: v_dual_mul_f32 v3, v7, v6 :: v_dual_mul_f32 v4, v5, v4
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v9, v1, s0, 0x400000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v5, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v0, s0, 0x400000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX11-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = fmul <4 x bfloat> %a, %b
ret <4 x bfloat> %op
@@ -14199,17 +13985,15 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX8-NEXT: v_mul_f32_e32 v3, v3, v7
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3
; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v6
@@ -14220,16 +14004,14 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX8-NEXT: v_mul_f32_e32 v2, v2, v6
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5
@@ -14240,16 +14022,14 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX8-NEXT: v_mul_f32_e32 v1, v1, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
@@ -14260,16 +14040,14 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
@@ -14288,74 +14066,66 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX9-NEXT: v_mul_f32_e32 v8, v9, v8
-; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_mul_f32_e32 v3, v3, v7
; GFX9-NEXT: v_add3_u32 v9, v9, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
-; GFX9-NEXT: v_mul_f32_e32 v3, v3, v7
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v7, v7, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v6
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v2
; GFX9-NEXT: v_mul_f32_e32 v7, v9, v7
-; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v2, v2, v6
; GFX9-NEXT: v_add3_u32 v9, v9, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
-; GFX9-NEXT: v_mul_f32_e32 v2, v2, v6
-; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v6, v6, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; GFX9-NEXT: v_mul_f32_e32 v6, v9, v6
-; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v1, v1, v5
; GFX9-NEXT: v_add3_u32 v9, v9, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX9-NEXT: v_mul_f32_e32 v1, v1, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v5, v5, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v5, v9, v5
-; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4
; GFX9-NEXT: v_add3_u32 v9, v9, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -14372,62 +14142,61 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_mul_f32_e32 v8, v9, v8
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v6
-; GFX10-NEXT: v_mul_f32_e32 v3, v3, v7
; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
-; GFX10-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v8, s4, 0x400000
-; GFX10-NEXT: v_mul_f32_e32 v9, v11, v9
-; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_mul_f32_e32 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX10-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX10-NEXT: v_mul_f32_e32 v7, v10, v9
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
; GFX10-NEXT: v_mul_f32_e32 v2, v2, v6
-; GFX10-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX10-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_bfe_u32 v12, v7, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5
-; GFX10-NEXT: v_and_or_b32 v12, v9, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX10-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v1
; GFX10-NEXT: v_bfe_u32 v13, v2, 16, 1
-; GFX10-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX10-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX10-NEXT: v_mul_f32_e32 v6, v10, v6
+; GFX10-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_mul_f32_e32 v6, v11, v6
-; GFX10-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v11, v2, s4, 0x400000
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v11, v12, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v12, v6, 16, 1
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v5
; GFX10-NEXT: v_mul_f32_e32 v5, v15, v13
-; GFX10-NEXT: v_and_or_b32 v14, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v14, 0x400000, v3
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
; GFX10-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX10-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX10-NEXT: v_bfe_u32 v12, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX10-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v15, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v11, v5, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX10-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo
+; GFX10-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v0, s4, 0x400000
-; GFX10-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v0
+; GFX10-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -14435,81 +14204,80 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX10-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
+; GFX10-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fmul_v8bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mul_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mul_f32 v8, v9, v8 :: v_dual_and_b32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mul_f32_e32 v9, v11, v9
-; GFX11-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_or_b32 v12, v9, s0, 0x400000
-; GFX11-NEXT: v_mul_f32_e32 v2, v2, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_mul_f32_e32 v3, v3, v7
-; GFX11-NEXT: v_and_or_b32 v7, v8, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v8, v9, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX11-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_mul_f32_e32 v7, v10, v9
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v14, v3, s0, 0x400000
-; GFX11-NEXT: v_mul_f32_e32 v0, v0, v4
-; GFX11-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mul_f32 v6, v11, v6 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_and_or_b32 v11, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_cndmask_b32 v7, v11, v12 :: v_dual_mul_f32 v2, v2, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_f32_e32 v6, v10, v6
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v12, v6, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v9, v6, s0, 0x400000
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_mul_f32_e32 v1, v1, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v4
; GFX11-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX11-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-NEXT: v_dual_mul_f32 v1, v1, v5 :: v_dual_cndmask_b32 v4, v4, v10
; GFX11-NEXT: v_mul_f32_e32 v5, v15, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v15, v1, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX11-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v11, v5, s0, 0x400000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX11-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX11-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v13, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -14518,9 +14286,9 @@ define <8 x bfloat> @v_fmul_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX11-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = fmul <8 x bfloat> %a, %b
ret <8 x bfloat> %op
@@ -14745,16 +14513,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX8-NEXT: v_mul_f32_e32 v7, v7, v15
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
; GFX8-NEXT: v_bfe_u32 v15, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v7
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v14
@@ -14765,16 +14531,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX8-NEXT: v_mul_f32_e32 v6, v6, v14
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
; GFX8-NEXT: v_bfe_u32 v14, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v14, vcc, v14, v6
; GFX8-NEXT: v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v13
@@ -14785,16 +14549,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX8-NEXT: v_mul_f32_e32 v5, v5, v13
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_bfe_u32 v13, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v5
; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v12
@@ -14805,16 +14567,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX8-NEXT: v_mul_f32_e32 v4, v4, v12
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_bfe_u32 v12, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v4
; GFX8-NEXT: v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11
@@ -14825,16 +14585,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX8-NEXT: v_mul_f32_e32 v3, v3, v11
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_bfe_u32 v11, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v3
; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v10
@@ -14845,16 +14603,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX8-NEXT: v_mul_f32_e32 v2, v2, v10
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_bfe_u32 v10, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v2
; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v9
@@ -14865,16 +14621,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX8-NEXT: v_mul_f32_e32 v1, v1, v9
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v8
@@ -14885,16 +14639,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v8
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_bfe_u32 v8, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v0
; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
@@ -14921,146 +14673,130 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
-; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_mul_f32_e32 v7, v7, v15
; GFX9-NEXT: v_add3_u32 v17, v17, v16, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
-; GFX9-NEXT: v_mul_f32_e32 v7, v7, v15
-; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v15, v15, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v14
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
; GFX9-NEXT: v_mul_f32_e32 v15, v17, v15
-; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v6, v6, v14
; GFX9-NEXT: v_add3_u32 v17, v17, v15, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
-; GFX9-NEXT: v_mul_f32_e32 v6, v6, v14
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v14, v14, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v14, 16, v13
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
; GFX9-NEXT: v_mul_f32_e32 v14, v17, v14
-; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v5, v5, v13
; GFX9-NEXT: v_add3_u32 v17, v17, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
-; GFX9-NEXT: v_mul_f32_e32 v5, v5, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v13, v13, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v12
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
; GFX9-NEXT: v_mul_f32_e32 v13, v17, v13
-; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v4, v4, v12
; GFX9-NEXT: v_add3_u32 v17, v17, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_mul_f32_e32 v4, v4, v12
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v12, v12, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_mul_f32_e32 v12, v17, v12
-; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v3, v3, v11
; GFX9-NEXT: v_add3_u32 v17, v17, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
-; GFX9-NEXT: v_mul_f32_e32 v3, v3, v11
-; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v11, v11, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v10
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; GFX9-NEXT: v_mul_f32_e32 v11, v17, v11
-; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v2, v2, v10
; GFX9-NEXT: v_add3_u32 v17, v17, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
-; GFX9-NEXT: v_mul_f32_e32 v2, v2, v10
-; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v10, v10, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v9
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v1
; GFX9-NEXT: v_mul_f32_e32 v10, v17, v10
-; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v1, v1, v9
; GFX9-NEXT: v_add3_u32 v17, v17, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
-; GFX9-NEXT: v_mul_f32_e32 v1, v1, v9
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v9, v9, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v8
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v9, v17, v9
-; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v8
; GFX9-NEXT: v_add3_u32 v17, v17, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v8
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v8, v8, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -15081,27 +14817,26 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_mul_f32_e32 v16, v17, v16
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX10-NEXT: v_mul_f32_e32 v7, v7, v15
; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_bfe_u32 v15, v16, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v16, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX10-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
; GFX10-NEXT: v_mul_f32_e32 v17, v18, v17
; GFX10-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
; GFX10-NEXT: v_mul_f32_e32 v6, v6, v14
; GFX10-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v7, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX10-NEXT: v_bfe_u32 v21, v17, 16, 1
; GFX10-NEXT: v_cndmask_b32_e32 v15, v15, v20, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v5
-; GFX10-NEXT: v_and_or_b32 v16, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX10-NEXT: v_add3_u32 v14, v21, v17, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
@@ -15115,7 +14850,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_mul_f32_e32 v5, v5, v13
; GFX10-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc_lo
; GFX10-NEXT: v_add3_u32 v16, v18, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v12
; GFX10-NEXT: v_bfe_u32 v20, v17, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
@@ -15125,10 +14860,10 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v6, v16, v13, vcc_lo
; GFX10-NEXT: v_mul_f32_e32 v13, v19, v18
; GFX10-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v5, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX10-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX10-NEXT: v_mul_f32_e32 v4, v4, v12
; GFX10-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
@@ -15138,14 +14873,14 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v19, v13, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX10-NEXT: v_mul_f32_e32 v12, v18, v12
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v10
; GFX10-NEXT: v_mul_f32_e32 v3, v3, v11
-; GFX10-NEXT: v_and_or_b32 v22, v12, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX10-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX10-NEXT: v_bfe_u32 v17, v12, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v2
@@ -15157,12 +14892,12 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
; GFX10-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX10-NEXT: v_bfe_u32 v23, v18, 16, 1
; GFX10-NEXT: v_mul_f32_e32 v2, v2, v10
; GFX10-NEXT: v_cndmask_b32_e32 v12, v17, v22, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_and_or_b32 v17, v18, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX10-NEXT: v_add3_u32 v10, v23, v18, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v1
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -15171,8 +14906,8 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_and_or_b32 v18, v2, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v2
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX10-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX10-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -15184,17 +14919,17 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v23, v19, 16, 1
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v9
; GFX10-NEXT: v_mul_f32_e32 v9, v22, v20
-; GFX10-NEXT: v_and_or_b32 v22, v19, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v19
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v8
; GFX10-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
; GFX10-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
; GFX10-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX10-NEXT: v_and_or_b32 v24, v9, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v25, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v22, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_bfe_u32 v20, v0, 16, 1
; GFX10-NEXT: v_add3_u32 v23, v23, v9, 0x7fff
@@ -15223,12 +14958,11 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_mul_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_mul_f32 v16, v17, v16 :: v_dual_and_b32 v15, 0xffff0000, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_and_or_b32 v20, v16, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mul_f32_e32 v17, v18, v17
; GFX11-NEXT: v_mul_f32_e32 v6, v6, v14
@@ -15241,13 +14975,13 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_mul_f32_e32 v7, v7, v15
; GFX11-NEXT: v_bfe_u32 v15, v16, 16, 1
; GFX11-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v16, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_dual_cndmask_b32 v15, v15, v20 :: v_dual_lshlrev_b32 v20, 16, v5
; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX11-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v18, v6, 16, 1
@@ -15269,32 +15003,32 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_mul_f32_e32 v5, v5, v13
-; GFX11-NEXT: v_and_or_b32 v13, v6, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_cndmask_b32 v6, v16, v13 :: v_dual_mul_f32 v13, v19, v18
; GFX11-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v18, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-NEXT: v_perm_b32 v6, v6, v14, 0x7060302
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; GFX11-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v5, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-NEXT: v_mul_f32_e32 v12, v18, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
; GFX11-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v10
-; GFX11-NEXT: v_and_or_b32 v22, v12, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v13, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX11-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX11-NEXT: v_and_or_b32 v21, v4, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX11-NEXT: v_perm_b32 v5, v5, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v17, v12, 16, 1
@@ -15310,7 +15044,7 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v23, v18, 16, 1
-; GFX11-NEXT: v_and_or_b32 v17, v18, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_mul_f32 v2, v2, v10 :: v_dual_and_b32 v1, 0xffff0000, v1
; GFX11-NEXT: v_mul_f32_e32 v3, v3, v11
@@ -15320,13 +15054,13 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_bfe_u32 v20, v3, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v20, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v3, v19, v20, vcc_lo
; GFX11-NEXT: v_bfe_u32 v19, v2, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_and_or_b32 v18, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -15343,13 +15077,13 @@ define <16 x bfloat> @v_fmul_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_mul_f32_e32 v9, v22, v20
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v22, v19, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v25, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX11-NEXT: v_and_or_b32 v24, v9, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v9
; GFX11-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v22, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_bfe_u32 v20, v0, 16, 1
@@ -15916,16 +15650,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX8-NEXT: v_mul_f32_e32 v14, v14, v30
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
; GFX8-NEXT: v_bfe_u32 v30, v14, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX8-NEXT: v_add_u32_e32 v30, vcc, v30, v14
; GFX8-NEXT: v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT: v_and_b32_e32 v32, 0xff800000, v14
-; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29
@@ -15947,29 +15679,25 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_mul_f32_e32 v30, v15, v30
; GFX8-NEXT: v_bfe_u32 v15, v33, 16, 1
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v33
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
; GFX8-NEXT: v_bfe_u32 v33, v30, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v30
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v30
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; GFX8-NEXT: v_cndmask_b32_e32 v30, v33, v34, vcc
; GFX8-NEXT: v_bfe_u32 v33, v32, 16, 1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v32
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
; GFX8-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v29, vcc, v29, v13
; GFX8-NEXT: v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
@@ -15980,16 +15708,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v29
; GFX8-NEXT: v_mul_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
; GFX8-NEXT: v_bfe_u32 v28, v12, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v28, vcc, v28, v12
; GFX8-NEXT: v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
@@ -16000,16 +15726,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX8-NEXT: v_mul_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
; GFX8-NEXT: v_bfe_u32 v27, v11, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v27, vcc, v27, v11
; GFX8-NEXT: v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v11
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
@@ -16020,16 +15744,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX8-NEXT: v_mul_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
; GFX8-NEXT: v_bfe_u32 v26, v10, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v26, vcc, v26, v10
; GFX8-NEXT: v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v10
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
@@ -16040,16 +15762,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX8-NEXT: v_mul_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
; GFX8-NEXT: v_bfe_u32 v25, v9, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v25, vcc, v25, v9
; GFX8-NEXT: v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v9
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
@@ -16060,16 +15780,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX8-NEXT: v_mul_f32_e32 v8, v8, v24
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
; GFX8-NEXT: v_bfe_u32 v24, v8, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v24, vcc, v24, v8
; GFX8-NEXT: v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v8
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
@@ -16080,16 +15798,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX8-NEXT: v_mul_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
; GFX8-NEXT: v_bfe_u32 v23, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v23, vcc, v23, v7
; GFX8-NEXT: v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
@@ -16100,16 +15816,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX8-NEXT: v_mul_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
; GFX8-NEXT: v_bfe_u32 v22, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v22, vcc, v22, v6
; GFX8-NEXT: v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
@@ -16120,16 +15834,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX8-NEXT: v_mul_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
; GFX8-NEXT: v_bfe_u32 v21, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v21, vcc, v21, v5
; GFX8-NEXT: v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
@@ -16140,16 +15852,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX8-NEXT: v_mul_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
; GFX8-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v20, vcc, v20, v4
; GFX8-NEXT: v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
@@ -16160,16 +15870,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX8-NEXT: v_mul_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
; GFX8-NEXT: v_bfe_u32 v19, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v19, vcc, v19, v3
; GFX8-NEXT: v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
@@ -16180,16 +15888,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX8-NEXT: v_mul_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
; GFX8-NEXT: v_bfe_u32 v18, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v18, vcc, v18, v2
; GFX8-NEXT: v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
@@ -16200,16 +15906,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX8-NEXT: v_mul_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
; GFX8-NEXT: v_bfe_u32 v17, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
@@ -16220,16 +15924,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0
; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -16270,292 +15972,260 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
; GFX9-NEXT: v_mul_f32_e32 v31, v32, v31
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v14, v14, v30
; GFX9-NEXT: v_add3_u32 v32, v32, v31, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
-; GFX9-NEXT: v_mul_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v32, 0xff800000, v14
+; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_add3_u32 v30, v30, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX9-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
+; GFX9-NEXT: v_mul_f32_e32 v30, v32, v30
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_mul_f32_e32 v13, v13, v29
-; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
-; GFX9-NEXT: v_mul_f32_e32 v30, v32, v30
; GFX9-NEXT: v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v30
+; GFX9-NEXT: v_mul_f32_e32 v13, v13, v29
; GFX9-NEXT: v_add3_u32 v32, v32, v30, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v30
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
+; GFX9-NEXT: v_bfe_u32 v29, v13, 16, 1
; GFX9-NEXT: v_cndmask_b32_e32 v30, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX9-NEXT: v_add3_u32 v29, v29, v13, s4
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v13
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v29, v32, vcc
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; GFX9-NEXT: v_mul_f32_e32 v32, v32, v29
+; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
+; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_mul_f32_e32 v12, v12, v28
+; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
+; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v29
-; GFX9-NEXT: v_mul_f32_e32 v32, v32, v33
+; GFX9-NEXT: v_lshlrev_b32_e32 v34, 16, v29
+; GFX9-NEXT: v_mul_f32_e32 v33, v33, v34
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_mul_f32_e32 v29, v15, v29
-; GFX9-NEXT: v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v32
-; GFX9-NEXT: v_add3_u32 v15, v15, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v29
-; GFX9-NEXT: v_add3_u32 v32, v32, v29, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_bfe_u32 v15, v33, 16, 1
+; GFX9-NEXT: v_add3_u32 v15, v15, v33, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v33
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
+; GFX9-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
+; GFX9-NEXT: v_add3_u32 v33, v33, v29, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
-; GFX9-NEXT: v_cndmask_b32_e32 v29, v32, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX9-NEXT: v_add3_u32 v32, v32, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v12
-; GFX9-NEXT: v_mul_f32_e32 v32, v33, v32
+; GFX9-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_add3_u32 v33, v33, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_mul_f32_e32 v12, v12, v28
; GFX9-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
-; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX9-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v11
; GFX9-NEXT: v_mul_f32_e32 v28, v33, v28
-; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v11, v11, v27
; GFX9-NEXT: v_add3_u32 v33, v33, v28, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
-; GFX9-NEXT: v_mul_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v27, v27, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX9-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v10
; GFX9-NEXT: v_mul_f32_e32 v27, v33, v27
-; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v10, v10, v26
; GFX9-NEXT: v_add3_u32 v33, v33, v27, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
-; GFX9-NEXT: v_mul_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v26, v26, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX9-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v9
; GFX9-NEXT: v_mul_f32_e32 v26, v33, v26
-; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v9, v9, v25
; GFX9-NEXT: v_add3_u32 v33, v33, v26, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
-; GFX9-NEXT: v_mul_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v25, v25, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX9-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v8
; GFX9-NEXT: v_mul_f32_e32 v25, v33, v25
-; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v8, v8, v24
; GFX9-NEXT: v_add3_u32 v33, v33, v25, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
-; GFX9-NEXT: v_mul_f32_e32 v8, v8, v24
-; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v24, v24, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX9-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
; GFX9-NEXT: v_mul_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v7, v7, v23
; GFX9-NEXT: v_add3_u32 v33, v33, v24, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
-; GFX9-NEXT: v_mul_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v23, v23, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
; GFX9-NEXT: v_mul_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v6, v6, v22
; GFX9-NEXT: v_add3_u32 v33, v33, v23, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
-; GFX9-NEXT: v_mul_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v22, v22, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
; GFX9-NEXT: v_mul_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v5, v5, v21
; GFX9-NEXT: v_add3_u32 v33, v33, v22, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
-; GFX9-NEXT: v_mul_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v21, v21, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
; GFX9-NEXT: v_mul_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v4, v4, v20
; GFX9-NEXT: v_add3_u32 v33, v33, v21, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
-; GFX9-NEXT: v_mul_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v20, v20, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
; GFX9-NEXT: v_mul_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v3, v3, v19
; GFX9-NEXT: v_add3_u32 v33, v33, v20, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
-; GFX9-NEXT: v_mul_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v19, v19, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
; GFX9-NEXT: v_mul_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v2, v2, v18
; GFX9-NEXT: v_add3_u32 v33, v33, v19, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
-; GFX9-NEXT: v_mul_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v18, v18, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; GFX9-NEXT: v_mul_f32_e32 v18, v33, v18
-; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v1, v1, v17
; GFX9-NEXT: v_add3_u32 v33, v33, v18, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
-; GFX9-NEXT: v_mul_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v17, v17, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v16
; GFX9-NEXT: v_add3_u32 v33, v33, v17, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v16, v16, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -16580,7 +16250,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-LABEL: v_fmul_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32
; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
@@ -16645,7 +16315,6 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v55, v11, 16, 1
; GFX10-NEXT: v_bfe_u32 v65, v49, 16, 1
; GFX10-NEXT: v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT: s_mov_b32 s23, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
@@ -16661,10 +16330,10 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v17
; GFX10-NEXT: v_mul_f32_e32 v17, v26, v50
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v16
-; GFX10-NEXT: v_and_or_b32 v54, v39, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v64, v11, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v66, v49, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v68, v10, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX10-NEXT: v_or_b32_e32 v64, 0x400000, v11
+; GFX10-NEXT: v_or_b32_e32 v66, 0x400000, v49
+; GFX10-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v39, v39
; GFX10-NEXT: v_add3_u32 v39, v53, v39, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v11, v11
@@ -16702,28 +16371,28 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v27, v14, 16, 1
; GFX10-NEXT: v_bfe_u32 v29, v35, 16, 1
; GFX10-NEXT: v_bfe_u32 v34, v13, 16, 1
-; GFX10-NEXT: v_and_or_b32 v48, v37, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v52, v12, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v37, v37
; GFX10-NEXT: v_add3_u32 v37, v38, v37, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v12, v12
; GFX10-NEXT: v_add3_u32 v12, v50, v12, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v18, v18
; GFX10-NEXT: v_add3_u32 v54, v54, v18, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v18, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v18
; GFX10-NEXT: v_cmp_u_f32_e64 s11, v1, v1
; GFX10-NEXT: v_add3_u32 v64, v64, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v1, v1, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e64 s12, v17, v17
; GFX10-NEXT: v_add3_u32 v66, v66, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v17, v17, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s22, v0, v0
; GFX10-NEXT: v_add3_u32 v68, v68, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v0, v0, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v26, v33, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v28, v14, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v30, v35, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v36, v13, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v0
+; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v33
+; GFX10-NEXT: v_or_b32_e32 v28, 0x400000, v14
+; GFX10-NEXT: v_or_b32_e32 v30, 0x400000, v35
+; GFX10-NEXT: v_or_b32_e32 v36, 0x400000, v13
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1
@@ -16742,12 +16411,12 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e64 v17, v66, v17, s12
; GFX10-NEXT: v_cndmask_b32_e64 v0, v68, v0, s22
; GFX10-NEXT: v_cndmask_b32_e64 v1, v64, v1, s11
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v15
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_and_or_b32 v27, v51, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v51
; GFX10-NEXT: v_bfe_u32 v35, v9, 16, 1
; GFX10-NEXT: v_bfe_u32 v38, v25, 16, 1
-; GFX10-NEXT: v_and_or_b32 v67, v24, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v67, 0x400000, v24
; GFX10-NEXT: v_cmp_u_f32_e64 s13, v51, v51
; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff
; GFX10-NEXT: v_bfe_u32 v51, v7, 16, 1
@@ -16764,51 +16433,51 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v36, v3, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v19, v19
; GFX10-NEXT: v_add3_u32 v48, v48, v19, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v19, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v19
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v2, v2
; GFX10-NEXT: v_add3_u32 v52, v52, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v2, v2, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX10-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX10-NEXT: v_perm_b32 v1, v1, v18, 0x7060302
-; GFX10-NEXT: v_and_or_b32 v34, v9, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v50, v25, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v50, 0x400000, v25
; GFX10-NEXT: v_bfe_u32 v53, v8, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9
; GFX10-NEXT: v_add3_u32 v9, v35, v9, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v35, v7, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v35, 0x400000, v7
; GFX10-NEXT: v_cmp_u_f32_e64 s15, v25, v25
; GFX10-NEXT: v_add3_u32 v25, v38, v25, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v23, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s18, v7, v7
; GFX10-NEXT: v_add3_u32 v7, v51, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v51, v6, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v51, 0x400000, v6
; GFX10-NEXT: v_cmp_u_f32_e64 s20, v6, v6
; GFX10-NEXT: v_add3_u32 v6, v65, v6, 0x7fff
; GFX10-NEXT: v_bfe_u32 v65, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v21, v21
; GFX10-NEXT: v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v21, v21, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v21
; GFX10-NEXT: v_cmp_u_f32_e64 s5, v4, v4
; GFX10-NEXT: v_add3_u32 v28, v28, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v4, v4, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX10-NEXT: v_cmp_u_f32_e64 s6, v20, v20
; GFX10-NEXT: v_add3_u32 v30, v30, v20, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v20, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v20
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v3, v3
; GFX10-NEXT: v_add3_u32 v36, v36, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v3, v3, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v3
; GFX10-NEXT: v_cndmask_b32_e64 v19, v48, v19, s8
; GFX10-NEXT: v_cndmask_b32_e64 v2, v52, v2, s9
-; GFX10-NEXT: v_and_or_b32 v55, v8, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v55, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e64 s16, v8, v8
; GFX10-NEXT: v_add3_u32 v8, v53, v8, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v53, v23, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v53, 0x400000, v23
; GFX10-NEXT: v_cmp_u_f32_e64 s19, v23, v23
; GFX10-NEXT: v_add3_u32 v23, v38, v23, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v22, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v65, v65, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v5, v5, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX10-NEXT: v_cndmask_b32_e64 v21, v26, v21, s4
; GFX10-NEXT: v_cndmask_b32_e64 v4, v28, v4, s5
; GFX10-NEXT: v_cndmask_b32_e64 v20, v30, v20, s6
@@ -16816,7 +16485,7 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX10-NEXT: v_cmp_u_f32_e64 s21, v22, v22
; GFX10-NEXT: v_add3_u32 v38, v38, v22, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v22, v22, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v22
; GFX10-NEXT: v_cndmask_b32_e32 v5, v65, v5, vcc_lo
; GFX10-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
@@ -16840,14 +16509,14 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v13, v13, v29, 0x7060302
; GFX10-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX10-NEXT: v_mul_f32_e32 v17, v32, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v32
+; GFX10-NEXT: v_mul_f32_e32 v17, v31, v17
; GFX10-NEXT: v_mul_f32_e32 v15, v15, v18
; GFX10-NEXT: v_bfe_u32 v18, v17, 16, 1
; GFX10-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v17, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v15, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v15, v15
; GFX10-NEXT: v_add3_u32 v17, v18, v17, 0x7fff
@@ -16860,212 +16529,219 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-LABEL: v_fmul_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: scratch_load_b32 v32, off, s32
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v26
+; GFX11-NEXT: v_dual_mul_f32 v5, v5, v21 :: v_dual_and_b32 v26, 0xffff0000, v26
; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mul_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_mul_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
+; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
; GFX11-NEXT: v_bfe_u32 v135, v1, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_or_b32 v144, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v5
+; GFX11-NEXT: v_or_b32_e32 v144, 0x400000, v1
+; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_add3_u32 v135, v135, v1, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_dual_mul_f32 v17, v86, v85 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_dual_mul_f32 v7, v7, v23 :: v_dual_lshlrev_b32 v36, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_mul_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
-; GFX11-NEXT: v_dual_mul_f32 v0, v0, v16 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_mul_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_mul_f32 v3, v3, v19 :: v_dual_lshlrev_b32 v54, 16, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
+; GFX11-NEXT: v_dual_mul_f32 v19, v82, v81 :: v_dual_lshlrev_b32 v64, 16, v7
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v130, 0x400000, v19
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
+; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
+; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_mul_f32 v17, v86, v85 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_dual_mul_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
+; GFX11-NEXT: v_or_b32_e32 v128, 0x400000, v3
+; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v146, 0x400000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_or_b32 v86, v24, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v96, v7, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
+; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v10
+; GFX11-NEXT: v_mul_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_dual_mul_f32 v24, v64, v55 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_mul_f32_e32 v7, v7, v23
+; GFX11-NEXT: v_dual_mul_f32 v23, v66, v65 :: v_dual_mul_f32 v18, v84, v83
+; GFX11-NEXT: v_dual_mul_f32 v9, v9, v25 :: v_dual_and_b32 v28, 0xffff0000, v28
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
+; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v86, 0x400000, v24
+; GFX11-NEXT: v_or_b32_e32 v98, 0x400000, v23
+; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
; GFX11-NEXT: v_add3_u32 v85, v85, v24, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_dual_mul_f32 v23, v66, v65 :: v_dual_lshlrev_b32 v48, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_dual_mul_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_mul_f32 v21, v70, v69 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_mul_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
-; GFX11-NEXT: v_mul_f32_e32 v2, v2, v18
-; GFX11-NEXT: v_mul_f32_e32 v18, v84, v83
-; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
-; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_and_or_b32 v84, v8, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v98, v23, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v100, v6, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v112, v5, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v114, v21, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
-; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
+; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_dual_mul_f32 v3, v3, v19 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_mul_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v51, 16, v25
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX11-NEXT: v_dual_mul_f32 v4, v4, v20 :: v_dual_and_b32 v25, 0xffff0000, v25
+; GFX11-NEXT: v_or_b32_e32 v96, 0x400000, v7
+; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_mul_f32_e32 v4, v4, v20
; GFX11-NEXT: v_mul_f32_e32 v20, v80, v71
-; GFX11-NEXT: v_dual_mul_f32 v19, v82, v81 :: v_dual_and_b32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mul_f32 v9, v9, v25 :: v_dual_and_b32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_mul_f32_e32 v25, v54, v53
+; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v80, 0x400000, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_mul_f32 v10, v10, v26 :: v_dual_and_b32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_dual_mul_f32 v12, v12, v28 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_mul_f32_e32 v28, v48, v39
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mul_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_dual_mul_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_mul_f32 v27, v50, v49 :: v_dual_mul_f32 v26, v52, v51
-; GFX11-NEXT: v_dual_mul_f32 v29, v38, v37 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
+; GFX11-NEXT: v_dual_mul_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_dual_mul_f32 v10, v10, v26 :: v_dual_and_b32 v29, 0xffff0000, v29
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_mul_f32_e32 v26, v52, v51
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_mul_f32_e32 v6, v6, v22
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-NEXT: v_dual_mul_f32 v11, v11, v27 :: v_dual_lshlrev_b32 v34, 16, v14
+; GFX11-NEXT: v_dual_mul_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v33, 16, v30
+; GFX11-NEXT: v_dual_mul_f32 v27, v50, v49 :: v_dual_lshlrev_b32 v38, 16, v12
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_dual_mul_f32 v25, v54, v53 :: v_dual_and_b32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_dual_mul_f32 v13, v13, v29 :: v_dual_and_b32 v30, 0xffff0000, v30
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mul_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_dual_mul_f32 v12, v12, v28 :: v_dual_and_b32 v15, 0xffff0000, v15
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_mul_f32_e32 v14, v14, v30
+; GFX11-NEXT: v_mul_f32_e32 v28, v48, v39
; GFX11-NEXT: v_dual_mul_f32 v30, v36, v35 :: v_dual_mul_f32 v33, v34, v33
-; GFX11-NEXT: v_and_or_b32 v48, v13, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
; GFX11-NEXT: v_bfe_u32 v35, v14, 16, 1
-; GFX11-NEXT: v_and_or_b32 v36, v14, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_bfe_u32 v16, v33, 16, 1
-; GFX11-NEXT: v_and_or_b32 v34, v33, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_add3_u32 v35, v35, v14, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v30
; GFX11-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v38, v30, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_add3_u32 v37, v37, v30, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v50, v29, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v13
+; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v34, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-NEXT: v_or_b32_e32 v50, 0x400000, v29
; GFX11-NEXT: v_bfe_u32 v51, v12, 16, 1
; GFX11-NEXT: v_add3_u32 v49, v49, v29, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v52, v12, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX11-NEXT: v_cndmask_b32_e32 v14, v35, v36, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
; GFX11-NEXT: v_add3_u32 v51, v51, v12, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v54, v28, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v28
; GFX11-NEXT: v_bfe_u32 v55, v11, 16, 1
-; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v30, v37, v38, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_and_or_b32 v64, v11, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v64, 0x400000, v11
; GFX11-NEXT: v_bfe_u32 v65, v27, 16, 1
; GFX11-NEXT: v_add3_u32 v55, v55, v11, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v66, v27, s0, 0x400000
; GFX11-NEXT: v_cndmask_b32_e32 v13, v39, v48, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-NEXT: v_or_b32_e32 v66, 0x400000, v27
; GFX11-NEXT: v_bfe_u32 v67, v10, 16, 1
; GFX11-NEXT: v_add3_u32 v65, v65, v27, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v68, v10, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX11-NEXT: v_cndmask_b32_e32 v29, v49, v50, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
; GFX11-NEXT: v_add3_u32 v67, v67, v10, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v70, v26, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
-; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v70, 0x400000, v26
+; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v12, v51, v52, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: v_and_or_b32 v80, v9, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
-; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v82, v25, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v25
+; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
+; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v28, v53, v54, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
-; GFX11-NEXT: v_and_or_b32 v102, v22, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v84, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v100, 0x400000, v6
; GFX11-NEXT: v_cndmask_b32_e32 v11, v55, v64, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v116, v4, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
-; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
+; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v22
+; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v27, v65, v66, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_and_or_b32 v118, v20, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
-; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v130, v19, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v114, 0x400000, v21
+; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v10, v67, v68, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
-; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v134, v18, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v116, 0x400000, v4
+; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
+; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v118, 0x400000, v20
; GFX11-NEXT: v_cndmask_b32_e32 v26, v69, v70, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v146, v17, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v134, 0x400000, v18
; GFX11-NEXT: v_bfe_u32 v147, v0, 16, 1
-; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v9, v71, v80, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v33, v0, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v147, v147, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v132, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v25, v81, v82, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v128, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v132, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
+; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
+; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v8, v83, v84, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v24, v85, v86, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v7, v87, v96, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
@@ -17104,22 +16780,21 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v2, v131, v132, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v3, v119, v128, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
-; GFX11-NEXT: v_mul_f32_e32 v17, v32, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mul_f32 v17, v31, v17 :: v_dual_and_b32 v18, 0xffff0000, v32
; GFX11-NEXT: v_mul_f32_e32 v15, v15, v18
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v18, v17, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_or_b32 v21, v15, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX11-NEXT: v_add3_u32 v18, v18, v17, 0x7fff
; GFX11-NEXT: v_add3_u32 v19, v19, v15, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -17194,9 +16869,8 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
; GFX8-NEXT: v_div_fixup_f32 v0, v2, v1, v0
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -17220,9 +16894,8 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
; GFX9-NEXT: v_div_fmas_f32 v2, v2, v4, v5
; GFX9-NEXT: v_div_fixup_f32 v0, v2, v1, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -17235,7 +16908,6 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_div_scale_f32 v2, s4, v1, v1, v0
; GFX10-NEXT: v_div_scale_f32 v5, vcc_lo, v0, v1, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_rcp_f32_e32 v3, v2
; GFX10-NEXT: v_fma_f32 v4, -v2, v3, 1.0
; GFX10-NEXT: v_fmac_f32_e32 v3, v4, v3
@@ -17246,7 +16918,7 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
; GFX10-NEXT: v_div_fmas_f32 v2, v2, v3, v4
; GFX10-NEXT: v_div_fixup_f32 v0, v2, v1, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -17258,7 +16930,6 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_div_scale_f32 v2, null, v1, v1, v0
; GFX11-NEXT: v_rcp_f32_e32 v3, v2
@@ -17277,7 +16948,7 @@ define bfloat @v_fdiv_bf16(bfloat %a, bfloat %b) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_div_fixup_f32 v0, v2, v1, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -17637,8 +17308,7 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -17652,9 +17322,8 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -17665,10 +17334,9 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -17680,11 +17348,10 @@ define bfloat @v_minnum_bf16(bfloat %a, bfloat %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -17750,16 +17417,14 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX8-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -17772,20 +17437,18 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX9-NEXT: v_min_f32_e32 v2, v3, v2
-; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -17799,14 +17462,13 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_f32_e32 v2, v3, v2
; GFX10-NEXT: v_min_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
@@ -17822,16 +17484,15 @@ define <2 x bfloat> @v_minnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_min_f32_e32 v0, v0, v1
; GFX11-NEXT: v_min_f32_e32 v2, v3, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
@@ -17913,8 +17574,7 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -17926,16 +17586,14 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX8-NEXT: v_min_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -17951,27 +17609,24 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX9-NEXT: v_min_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v0
; GFX9-NEXT: v_min_f32_e32 v3, v4, v3
-; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_min_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -17989,18 +17644,17 @@ define <3 x bfloat> @v_minnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_min_f32_e32 v4, v5, v4
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_f32_e32 v0, v0, v2
; GFX10-NEXT: v_min_f32_e32 v1, v1, v3
; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -18100,17 +17754,15 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX8-NEXT: v_min_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -18121,16 +17773,14 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX8-NEXT: v_min_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -18145,38 +17795,34 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v1
; GFX9-NEXT: v_min_f32_e32 v4, v5, v4
-; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_min_f32_e32 v1, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX9-NEXT: v_min_f32_e32 v3, v5, v3
-; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_min_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -18197,31 +17843,30 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_min_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_min_f32_e32 v5, v7, v6
-; GFX10-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
+; GFX10-NEXT: v_min_f32_e32 v3, v7, v6
+; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_min_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_and_or_b32 v6, v4, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v1, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v6, v5, s4, 0x400000
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
+; GFX10-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minnum_v4bf16:
@@ -18229,45 +17874,42 @@ define <4 x bfloat> @v_minnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_min_f32_e32 v4, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_min_f32 v0, v0, v2 :: v_dual_and_b32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_min_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v5, v7, v6
-; GFX11-NEXT: v_and_or_b32 v6, v4, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-NEXT: v_dual_min_f32 v3, v7, v6 :: v_dual_min_f32 v4, v5, v4
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v9, v1, s0, 0x400000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v5, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v0, s0, 0x400000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX11-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = call <4 x bfloat> @llvm.minnum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b)
ret <4 x bfloat> %op
@@ -18423,17 +18065,15 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX8-NEXT: v_min_f32_e32 v3, v3, v7
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3
; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v6
@@ -18444,16 +18084,14 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX8-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5
@@ -18464,16 +18102,14 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX8-NEXT: v_min_f32_e32 v1, v1, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
@@ -18484,16 +18120,14 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX8-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
@@ -18512,74 +18146,66 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX9-NEXT: v_min_f32_e32 v8, v9, v8
-; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_min_f32_e32 v3, v3, v7
; GFX9-NEXT: v_add3_u32 v9, v9, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
-; GFX9-NEXT: v_min_f32_e32 v3, v3, v7
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v7, v7, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v6
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v2
; GFX9-NEXT: v_min_f32_e32 v7, v9, v7
-; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v6
; GFX9-NEXT: v_add3_u32 v9, v9, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
-; GFX9-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v6, v6, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; GFX9-NEXT: v_min_f32_e32 v6, v9, v6
-; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v5
; GFX9-NEXT: v_add3_u32 v9, v9, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX9-NEXT: v_min_f32_e32 v1, v1, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v5, v5, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v0
; GFX9-NEXT: v_min_f32_e32 v5, v9, v5
-; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v4
; GFX9-NEXT: v_add3_u32 v9, v9, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX9-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -18596,62 +18222,61 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_min_f32_e32 v8, v9, v8
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v6
-; GFX10-NEXT: v_min_f32_e32 v3, v3, v7
; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
-; GFX10-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v8, s4, 0x400000
-; GFX10-NEXT: v_min_f32_e32 v9, v11, v9
-; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX10-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX10-NEXT: v_min_f32_e32 v7, v10, v9
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
; GFX10-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX10-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX10-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_bfe_u32 v12, v7, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5
-; GFX10-NEXT: v_and_or_b32 v12, v9, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX10-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v1
; GFX10-NEXT: v_bfe_u32 v13, v2, 16, 1
-; GFX10-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX10-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX10-NEXT: v_min_f32_e32 v6, v10, v6
+; GFX10-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_min_f32_e32 v6, v11, v6
-; GFX10-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v11, v2, s4, 0x400000
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v11, v12, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v12, v6, 16, 1
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_min_f32_e32 v1, v1, v5
; GFX10-NEXT: v_min_f32_e32 v5, v15, v13
-; GFX10-NEXT: v_and_or_b32 v14, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v14, 0x400000, v3
; GFX10-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
; GFX10-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX10-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX10-NEXT: v_bfe_u32 v12, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX10-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v15, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v11, v5, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX10-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo
+; GFX10-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v0, s4, 0x400000
-; GFX10-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v0
+; GFX10-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -18659,81 +18284,80 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX10-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
+; GFX10-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minnum_v8bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_min_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_min_f32 v8, v9, v8 :: v_dual_and_b32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_min_f32_e32 v9, v11, v9
-; GFX11-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_or_b32 v12, v9, s0, 0x400000
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_min_f32_e32 v3, v3, v7
-; GFX11-NEXT: v_and_or_b32 v7, v8, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v8, v9, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX11-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_min_f32_e32 v7, v10, v9
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v14, v3, s0, 0x400000
-; GFX11-NEXT: v_min_f32_e32 v0, v0, v4
-; GFX11-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_min_f32 v6, v11, v6 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_and_or_b32 v11, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_cndmask_b32 v7, v11, v12 :: v_dual_min_f32 v2, v2, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_f32_e32 v6, v10, v6
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v12, v6, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v9, v6, s0, 0x400000
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_min_f32_e32 v1, v1, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v4
; GFX11-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX11-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-NEXT: v_dual_min_f32 v1, v1, v5 :: v_dual_cndmask_b32 v4, v4, v10
; GFX11-NEXT: v_min_f32_e32 v5, v15, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v15, v1, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX11-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v11, v5, s0, 0x400000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX11-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX11-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v13, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -18742,9 +18366,9 @@ define <8 x bfloat> @v_minnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX11-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = call <8 x bfloat> @llvm.minnum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b)
ret <8 x bfloat> %op
@@ -19033,16 +18657,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX8-NEXT: v_min_f32_e32 v7, v7, v15
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
; GFX8-NEXT: v_bfe_u32 v15, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v7
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v14
@@ -19053,16 +18675,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX8-NEXT: v_min_f32_e32 v6, v6, v14
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
; GFX8-NEXT: v_bfe_u32 v14, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v14, vcc, v14, v6
; GFX8-NEXT: v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v13
@@ -19073,16 +18693,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX8-NEXT: v_min_f32_e32 v5, v5, v13
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_bfe_u32 v13, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v5
; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v12
@@ -19093,16 +18711,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX8-NEXT: v_min_f32_e32 v4, v4, v12
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_bfe_u32 v12, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v4
; GFX8-NEXT: v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11
@@ -19113,16 +18729,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX8-NEXT: v_min_f32_e32 v3, v3, v11
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_bfe_u32 v11, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v3
; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v10
@@ -19133,16 +18747,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX8-NEXT: v_min_f32_e32 v2, v2, v10
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_bfe_u32 v10, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v2
; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v9
@@ -19153,16 +18765,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX8-NEXT: v_min_f32_e32 v1, v1, v9
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v8
@@ -19173,16 +18783,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX8-NEXT: v_min_f32_e32 v0, v0, v8
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_bfe_u32 v8, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v0
; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
@@ -19209,146 +18817,130 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
-; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_min_f32_e32 v7, v7, v15
; GFX9-NEXT: v_add3_u32 v17, v17, v16, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
-; GFX9-NEXT: v_min_f32_e32 v7, v7, v15
-; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v15, v15, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v14
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
; GFX9-NEXT: v_min_f32_e32 v15, v17, v15
-; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v6, v6, v14
; GFX9-NEXT: v_add3_u32 v17, v17, v15, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
-; GFX9-NEXT: v_min_f32_e32 v6, v6, v14
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v14, v14, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v14, 16, v13
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
; GFX9-NEXT: v_min_f32_e32 v14, v17, v14
-; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v5, v5, v13
; GFX9-NEXT: v_add3_u32 v17, v17, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
-; GFX9-NEXT: v_min_f32_e32 v5, v5, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v13, v13, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v12
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
; GFX9-NEXT: v_min_f32_e32 v13, v17, v13
-; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v4, v4, v12
; GFX9-NEXT: v_add3_u32 v17, v17, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_min_f32_e32 v4, v4, v12
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v12, v12, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_min_f32_e32 v12, v17, v12
-; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v3, v3, v11
; GFX9-NEXT: v_add3_u32 v17, v17, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
-; GFX9-NEXT: v_min_f32_e32 v3, v3, v11
-; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v11, v11, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v10
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; GFX9-NEXT: v_min_f32_e32 v11, v17, v11
-; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v10
; GFX9-NEXT: v_add3_u32 v17, v17, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
-; GFX9-NEXT: v_min_f32_e32 v2, v2, v10
-; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v10, v10, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v9
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v1
; GFX9-NEXT: v_min_f32_e32 v10, v17, v10
-; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v9
; GFX9-NEXT: v_add3_u32 v17, v17, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
-; GFX9-NEXT: v_min_f32_e32 v1, v1, v9
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v9, v9, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v8
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v0
; GFX9-NEXT: v_min_f32_e32 v9, v17, v9
-; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v8
; GFX9-NEXT: v_add3_u32 v17, v17, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
-; GFX9-NEXT: v_min_f32_e32 v0, v0, v8
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v8, v8, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -19369,27 +18961,26 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_min_f32_e32 v16, v17, v16
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX10-NEXT: v_min_f32_e32 v7, v7, v15
; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_bfe_u32 v15, v16, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v16, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX10-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
; GFX10-NEXT: v_min_f32_e32 v17, v18, v17
; GFX10-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
; GFX10-NEXT: v_min_f32_e32 v6, v6, v14
; GFX10-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v7, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX10-NEXT: v_bfe_u32 v21, v17, 16, 1
; GFX10-NEXT: v_cndmask_b32_e32 v15, v15, v20, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v5
-; GFX10-NEXT: v_and_or_b32 v16, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX10-NEXT: v_add3_u32 v14, v21, v17, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
@@ -19403,7 +18994,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_min_f32_e32 v5, v5, v13
; GFX10-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc_lo
; GFX10-NEXT: v_add3_u32 v16, v18, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v12
; GFX10-NEXT: v_bfe_u32 v20, v17, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
@@ -19413,10 +19004,10 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v6, v16, v13, vcc_lo
; GFX10-NEXT: v_min_f32_e32 v13, v19, v18
; GFX10-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v5, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX10-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX10-NEXT: v_min_f32_e32 v4, v4, v12
; GFX10-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
@@ -19426,14 +19017,14 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v19, v13, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX10-NEXT: v_min_f32_e32 v12, v18, v12
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v10
; GFX10-NEXT: v_min_f32_e32 v3, v3, v11
-; GFX10-NEXT: v_and_or_b32 v22, v12, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX10-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX10-NEXT: v_bfe_u32 v17, v12, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v2
@@ -19445,12 +19036,12 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
; GFX10-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX10-NEXT: v_bfe_u32 v23, v18, 16, 1
; GFX10-NEXT: v_min_f32_e32 v2, v2, v10
; GFX10-NEXT: v_cndmask_b32_e32 v12, v17, v22, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_and_or_b32 v17, v18, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX10-NEXT: v_add3_u32 v10, v23, v18, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v1
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -19459,8 +19050,8 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_and_or_b32 v18, v2, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v2
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX10-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX10-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -19472,17 +19063,17 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v23, v19, 16, 1
; GFX10-NEXT: v_min_f32_e32 v1, v1, v9
; GFX10-NEXT: v_min_f32_e32 v9, v22, v20
-; GFX10-NEXT: v_and_or_b32 v22, v19, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v19
; GFX10-NEXT: v_min_f32_e32 v0, v0, v8
; GFX10-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
; GFX10-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
; GFX10-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX10-NEXT: v_and_or_b32 v24, v9, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v25, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v22, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_bfe_u32 v20, v0, 16, 1
; GFX10-NEXT: v_add3_u32 v23, v23, v9, 0x7fff
@@ -19511,12 +19102,11 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_min_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_min_f32 v16, v17, v16 :: v_dual_and_b32 v15, 0xffff0000, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_and_or_b32 v20, v16, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_min_f32_e32 v17, v18, v17
; GFX11-NEXT: v_min_f32_e32 v6, v6, v14
@@ -19529,13 +19119,13 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_min_f32_e32 v7, v7, v15
; GFX11-NEXT: v_bfe_u32 v15, v16, 16, 1
; GFX11-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v16, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_dual_cndmask_b32 v15, v15, v20 :: v_dual_lshlrev_b32 v20, 16, v5
; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX11-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v18, v6, 16, 1
@@ -19557,32 +19147,32 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_min_f32_e32 v5, v5, v13
-; GFX11-NEXT: v_and_or_b32 v13, v6, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_cndmask_b32 v6, v16, v13 :: v_dual_min_f32 v13, v19, v18
; GFX11-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v18, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-NEXT: v_perm_b32 v6, v6, v14, 0x7060302
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; GFX11-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v5, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-NEXT: v_min_f32_e32 v12, v18, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
; GFX11-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v10
-; GFX11-NEXT: v_and_or_b32 v22, v12, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v13, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX11-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX11-NEXT: v_and_or_b32 v21, v4, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX11-NEXT: v_perm_b32 v5, v5, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v17, v12, 16, 1
@@ -19598,7 +19188,7 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v23, v18, 16, 1
-; GFX11-NEXT: v_and_or_b32 v17, v18, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_min_f32 v2, v2, v10 :: v_dual_and_b32 v1, 0xffff0000, v1
; GFX11-NEXT: v_min_f32_e32 v3, v3, v11
@@ -19608,13 +19198,13 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_bfe_u32 v20, v3, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v20, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v3, v19, v20, vcc_lo
; GFX11-NEXT: v_bfe_u32 v19, v2, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_and_or_b32 v18, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -19631,13 +19221,13 @@ define <16 x bfloat> @v_minnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_min_f32_e32 v9, v22, v20
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v22, v19, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v25, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX11-NEXT: v_and_or_b32 v24, v9, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v9
; GFX11-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v22, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_bfe_u32 v20, v0, 16, 1
@@ -20332,16 +19922,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX8-NEXT: v_min_f32_e32 v14, v14, v30
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
; GFX8-NEXT: v_bfe_u32 v30, v14, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX8-NEXT: v_add_u32_e32 v30, vcc, v30, v14
; GFX8-NEXT: v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT: v_and_b32_e32 v32, 0xff800000, v14
-; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29
@@ -20363,29 +19951,25 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_min_f32_e32 v30, v15, v30
; GFX8-NEXT: v_bfe_u32 v15, v33, 16, 1
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v33
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
; GFX8-NEXT: v_bfe_u32 v33, v30, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v30
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v30
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; GFX8-NEXT: v_cndmask_b32_e32 v30, v33, v34, vcc
; GFX8-NEXT: v_bfe_u32 v33, v32, 16, 1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v32
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
; GFX8-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v29, vcc, v29, v13
; GFX8-NEXT: v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
@@ -20396,16 +19980,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v29
; GFX8-NEXT: v_min_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
; GFX8-NEXT: v_bfe_u32 v28, v12, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v28, vcc, v28, v12
; GFX8-NEXT: v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
@@ -20416,16 +19998,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX8-NEXT: v_min_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
; GFX8-NEXT: v_bfe_u32 v27, v11, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v27, vcc, v27, v11
; GFX8-NEXT: v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v11
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
@@ -20436,16 +20016,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX8-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
; GFX8-NEXT: v_bfe_u32 v26, v10, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v26, vcc, v26, v10
; GFX8-NEXT: v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v10
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
@@ -20456,16 +20034,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX8-NEXT: v_min_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
; GFX8-NEXT: v_bfe_u32 v25, v9, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v25, vcc, v25, v9
; GFX8-NEXT: v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v9
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
@@ -20476,16 +20052,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX8-NEXT: v_min_f32_e32 v8, v8, v24
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
; GFX8-NEXT: v_bfe_u32 v24, v8, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v24, vcc, v24, v8
; GFX8-NEXT: v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v8
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
@@ -20496,16 +20070,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX8-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
; GFX8-NEXT: v_bfe_u32 v23, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v23, vcc, v23, v7
; GFX8-NEXT: v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
@@ -20516,16 +20088,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX8-NEXT: v_min_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
; GFX8-NEXT: v_bfe_u32 v22, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v22, vcc, v22, v6
; GFX8-NEXT: v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
@@ -20536,16 +20106,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX8-NEXT: v_min_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
; GFX8-NEXT: v_bfe_u32 v21, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v21, vcc, v21, v5
; GFX8-NEXT: v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
@@ -20556,16 +20124,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX8-NEXT: v_min_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
; GFX8-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v20, vcc, v20, v4
; GFX8-NEXT: v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
@@ -20576,16 +20142,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX8-NEXT: v_min_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
; GFX8-NEXT: v_bfe_u32 v19, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v19, vcc, v19, v3
; GFX8-NEXT: v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
@@ -20596,16 +20160,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX8-NEXT: v_min_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
; GFX8-NEXT: v_bfe_u32 v18, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v18, vcc, v18, v2
; GFX8-NEXT: v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
@@ -20616,16 +20178,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX8-NEXT: v_min_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
; GFX8-NEXT: v_bfe_u32 v17, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
@@ -20636,16 +20196,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX8-NEXT: v_min_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0
; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -20686,292 +20244,260 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
; GFX9-NEXT: v_min_f32_e32 v31, v32, v31
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v14, v14, v30
; GFX9-NEXT: v_add3_u32 v32, v32, v31, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
-; GFX9-NEXT: v_min_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v32, 0xff800000, v14
+; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_add3_u32 v30, v30, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX9-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
+; GFX9-NEXT: v_min_f32_e32 v30, v32, v30
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_min_f32_e32 v13, v13, v29
-; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
-; GFX9-NEXT: v_min_f32_e32 v30, v32, v30
; GFX9-NEXT: v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v30
+; GFX9-NEXT: v_min_f32_e32 v13, v13, v29
; GFX9-NEXT: v_add3_u32 v32, v32, v30, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v30
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
+; GFX9-NEXT: v_bfe_u32 v29, v13, 16, 1
; GFX9-NEXT: v_cndmask_b32_e32 v30, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX9-NEXT: v_add3_u32 v29, v29, v13, s4
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v13
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v29, v32, vcc
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; GFX9-NEXT: v_min_f32_e32 v32, v32, v29
+; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
+; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_min_f32_e32 v12, v12, v28
+; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
+; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v29
-; GFX9-NEXT: v_min_f32_e32 v32, v32, v33
+; GFX9-NEXT: v_lshlrev_b32_e32 v34, 16, v29
+; GFX9-NEXT: v_min_f32_e32 v33, v33, v34
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_min_f32_e32 v29, v15, v29
-; GFX9-NEXT: v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v32
-; GFX9-NEXT: v_add3_u32 v15, v15, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v29
-; GFX9-NEXT: v_add3_u32 v32, v32, v29, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_bfe_u32 v15, v33, 16, 1
+; GFX9-NEXT: v_add3_u32 v15, v15, v33, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v33
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
+; GFX9-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
+; GFX9-NEXT: v_add3_u32 v33, v33, v29, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
-; GFX9-NEXT: v_cndmask_b32_e32 v29, v32, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX9-NEXT: v_add3_u32 v32, v32, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v12
-; GFX9-NEXT: v_min_f32_e32 v32, v33, v32
+; GFX9-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_add3_u32 v33, v33, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_min_f32_e32 v12, v12, v28
; GFX9-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
-; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX9-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v11
; GFX9-NEXT: v_min_f32_e32 v28, v33, v28
-; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v11, v11, v27
; GFX9-NEXT: v_add3_u32 v33, v33, v28, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
-; GFX9-NEXT: v_min_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v27, v27, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX9-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v10
; GFX9-NEXT: v_min_f32_e32 v27, v33, v27
-; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v10, v10, v26
; GFX9-NEXT: v_add3_u32 v33, v33, v27, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
-; GFX9-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v26, v26, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX9-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v9
; GFX9-NEXT: v_min_f32_e32 v26, v33, v26
-; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v9, v9, v25
; GFX9-NEXT: v_add3_u32 v33, v33, v26, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
-; GFX9-NEXT: v_min_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v25, v25, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX9-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v8
; GFX9-NEXT: v_min_f32_e32 v25, v33, v25
-; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v8, v8, v24
; GFX9-NEXT: v_add3_u32 v33, v33, v25, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
-; GFX9-NEXT: v_min_f32_e32 v8, v8, v24
-; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v24, v24, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX9-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
; GFX9-NEXT: v_min_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v7, v7, v23
; GFX9-NEXT: v_add3_u32 v33, v33, v24, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
-; GFX9-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v23, v23, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
; GFX9-NEXT: v_min_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v6, v6, v22
; GFX9-NEXT: v_add3_u32 v33, v33, v23, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
-; GFX9-NEXT: v_min_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v22, v22, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
; GFX9-NEXT: v_min_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v5, v5, v21
; GFX9-NEXT: v_add3_u32 v33, v33, v22, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
-; GFX9-NEXT: v_min_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v21, v21, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
; GFX9-NEXT: v_min_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v4, v4, v20
; GFX9-NEXT: v_add3_u32 v33, v33, v21, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
-; GFX9-NEXT: v_min_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v20, v20, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
; GFX9-NEXT: v_min_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v3, v3, v19
; GFX9-NEXT: v_add3_u32 v33, v33, v20, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
-; GFX9-NEXT: v_min_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v19, v19, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
; GFX9-NEXT: v_min_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v18
; GFX9-NEXT: v_add3_u32 v33, v33, v19, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
-; GFX9-NEXT: v_min_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v18, v18, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; GFX9-NEXT: v_min_f32_e32 v18, v33, v18
-; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v1, v1, v17
; GFX9-NEXT: v_add3_u32 v33, v33, v18, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
-; GFX9-NEXT: v_min_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v17, v17, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
; GFX9-NEXT: v_min_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v16
; GFX9-NEXT: v_add3_u32 v33, v33, v17, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
-; GFX9-NEXT: v_min_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v16, v16, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -20996,7 +20522,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-LABEL: v_minnum_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32
; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
@@ -21061,7 +20587,6 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v55, v11, 16, 1
; GFX10-NEXT: v_bfe_u32 v65, v49, 16, 1
; GFX10-NEXT: v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT: s_mov_b32 s23, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
@@ -21077,10 +20602,10 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_min_f32_e32 v1, v1, v17
; GFX10-NEXT: v_min_f32_e32 v17, v26, v50
; GFX10-NEXT: v_min_f32_e32 v0, v0, v16
-; GFX10-NEXT: v_and_or_b32 v54, v39, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v64, v11, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v66, v49, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v68, v10, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX10-NEXT: v_or_b32_e32 v64, 0x400000, v11
+; GFX10-NEXT: v_or_b32_e32 v66, 0x400000, v49
+; GFX10-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v39, v39
; GFX10-NEXT: v_add3_u32 v39, v53, v39, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v11, v11
@@ -21118,28 +20643,28 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v27, v14, 16, 1
; GFX10-NEXT: v_bfe_u32 v29, v35, 16, 1
; GFX10-NEXT: v_bfe_u32 v34, v13, 16, 1
-; GFX10-NEXT: v_and_or_b32 v48, v37, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v52, v12, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v37, v37
; GFX10-NEXT: v_add3_u32 v37, v38, v37, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v12, v12
; GFX10-NEXT: v_add3_u32 v12, v50, v12, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v18, v18
; GFX10-NEXT: v_add3_u32 v54, v54, v18, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v18, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v18
; GFX10-NEXT: v_cmp_u_f32_e64 s11, v1, v1
; GFX10-NEXT: v_add3_u32 v64, v64, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v1, v1, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e64 s12, v17, v17
; GFX10-NEXT: v_add3_u32 v66, v66, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v17, v17, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s22, v0, v0
; GFX10-NEXT: v_add3_u32 v68, v68, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v0, v0, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v26, v33, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v28, v14, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v30, v35, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v36, v13, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v0
+; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v33
+; GFX10-NEXT: v_or_b32_e32 v28, 0x400000, v14
+; GFX10-NEXT: v_or_b32_e32 v30, 0x400000, v35
+; GFX10-NEXT: v_or_b32_e32 v36, 0x400000, v13
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1
@@ -21158,12 +20683,12 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e64 v17, v66, v17, s12
; GFX10-NEXT: v_cndmask_b32_e64 v0, v68, v0, s22
; GFX10-NEXT: v_cndmask_b32_e64 v1, v64, v1, s11
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v15
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_and_or_b32 v27, v51, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v51
; GFX10-NEXT: v_bfe_u32 v35, v9, 16, 1
; GFX10-NEXT: v_bfe_u32 v38, v25, 16, 1
-; GFX10-NEXT: v_and_or_b32 v67, v24, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v67, 0x400000, v24
; GFX10-NEXT: v_cmp_u_f32_e64 s13, v51, v51
; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff
; GFX10-NEXT: v_bfe_u32 v51, v7, 16, 1
@@ -21180,51 +20705,51 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v36, v3, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v19, v19
; GFX10-NEXT: v_add3_u32 v48, v48, v19, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v19, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v19
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v2, v2
; GFX10-NEXT: v_add3_u32 v52, v52, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v2, v2, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX10-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX10-NEXT: v_perm_b32 v1, v1, v18, 0x7060302
-; GFX10-NEXT: v_and_or_b32 v34, v9, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v50, v25, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v50, 0x400000, v25
; GFX10-NEXT: v_bfe_u32 v53, v8, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9
; GFX10-NEXT: v_add3_u32 v9, v35, v9, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v35, v7, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v35, 0x400000, v7
; GFX10-NEXT: v_cmp_u_f32_e64 s15, v25, v25
; GFX10-NEXT: v_add3_u32 v25, v38, v25, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v23, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s18, v7, v7
; GFX10-NEXT: v_add3_u32 v7, v51, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v51, v6, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v51, 0x400000, v6
; GFX10-NEXT: v_cmp_u_f32_e64 s20, v6, v6
; GFX10-NEXT: v_add3_u32 v6, v65, v6, 0x7fff
; GFX10-NEXT: v_bfe_u32 v65, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v21, v21
; GFX10-NEXT: v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v21, v21, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v21
; GFX10-NEXT: v_cmp_u_f32_e64 s5, v4, v4
; GFX10-NEXT: v_add3_u32 v28, v28, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v4, v4, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX10-NEXT: v_cmp_u_f32_e64 s6, v20, v20
; GFX10-NEXT: v_add3_u32 v30, v30, v20, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v20, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v20
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v3, v3
; GFX10-NEXT: v_add3_u32 v36, v36, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v3, v3, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v3
; GFX10-NEXT: v_cndmask_b32_e64 v19, v48, v19, s8
; GFX10-NEXT: v_cndmask_b32_e64 v2, v52, v2, s9
-; GFX10-NEXT: v_and_or_b32 v55, v8, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v55, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e64 s16, v8, v8
; GFX10-NEXT: v_add3_u32 v8, v53, v8, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v53, v23, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v53, 0x400000, v23
; GFX10-NEXT: v_cmp_u_f32_e64 s19, v23, v23
; GFX10-NEXT: v_add3_u32 v23, v38, v23, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v22, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v65, v65, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v5, v5, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX10-NEXT: v_cndmask_b32_e64 v21, v26, v21, s4
; GFX10-NEXT: v_cndmask_b32_e64 v4, v28, v4, s5
; GFX10-NEXT: v_cndmask_b32_e64 v20, v30, v20, s6
@@ -21232,7 +20757,7 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX10-NEXT: v_cmp_u_f32_e64 s21, v22, v22
; GFX10-NEXT: v_add3_u32 v38, v38, v22, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v22, v22, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v22
; GFX10-NEXT: v_cndmask_b32_e32 v5, v65, v5, vcc_lo
; GFX10-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
@@ -21256,14 +20781,14 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v13, v13, v29, 0x7060302
; GFX10-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX10-NEXT: v_min_f32_e32 v17, v32, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v32
+; GFX10-NEXT: v_min_f32_e32 v17, v31, v17
; GFX10-NEXT: v_min_f32_e32 v15, v15, v18
; GFX10-NEXT: v_bfe_u32 v18, v17, 16, 1
; GFX10-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v17, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v15, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v15, v15
; GFX10-NEXT: v_add3_u32 v17, v18, v17, 0x7fff
@@ -21276,212 +20801,219 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-LABEL: v_minnum_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: scratch_load_b32 v32, off, s32
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v26
+; GFX11-NEXT: v_dual_min_f32 v5, v5, v21 :: v_dual_and_b32 v26, 0xffff0000, v26
; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_min_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_min_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
+; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
; GFX11-NEXT: v_bfe_u32 v135, v1, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_or_b32 v144, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v5
+; GFX11-NEXT: v_or_b32_e32 v144, 0x400000, v1
+; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_add3_u32 v135, v135, v1, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_dual_min_f32 v17, v86, v85 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_dual_min_f32 v7, v7, v23 :: v_dual_lshlrev_b32 v36, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_min_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v16 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_min_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_min_f32 v3, v3, v19 :: v_dual_lshlrev_b32 v54, 16, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
+; GFX11-NEXT: v_dual_min_f32 v19, v82, v81 :: v_dual_lshlrev_b32 v64, 16, v7
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v130, 0x400000, v19
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
+; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
+; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_min_f32 v17, v86, v85 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_dual_min_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
+; GFX11-NEXT: v_or_b32_e32 v128, 0x400000, v3
+; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v146, 0x400000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_or_b32 v86, v24, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v96, v7, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
+; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v10
+; GFX11-NEXT: v_min_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_dual_min_f32 v24, v64, v55 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_min_f32_e32 v7, v7, v23
+; GFX11-NEXT: v_dual_min_f32 v23, v66, v65 :: v_dual_min_f32 v18, v84, v83
+; GFX11-NEXT: v_dual_min_f32 v9, v9, v25 :: v_dual_and_b32 v28, 0xffff0000, v28
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
+; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v86, 0x400000, v24
+; GFX11-NEXT: v_or_b32_e32 v98, 0x400000, v23
+; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
; GFX11-NEXT: v_add3_u32 v85, v85, v24, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_dual_min_f32 v23, v66, v65 :: v_dual_lshlrev_b32 v48, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_dual_min_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_min_f32 v21, v70, v69 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
-; GFX11-NEXT: v_min_f32_e32 v2, v2, v18
-; GFX11-NEXT: v_min_f32_e32 v18, v84, v83
-; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
-; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_and_or_b32 v84, v8, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v98, v23, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v100, v6, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v112, v5, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v114, v21, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
-; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
+; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_dual_min_f32 v3, v3, v19 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_min_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v51, 16, v25
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX11-NEXT: v_dual_min_f32 v4, v4, v20 :: v_dual_and_b32 v25, 0xffff0000, v25
+; GFX11-NEXT: v_or_b32_e32 v96, 0x400000, v7
+; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_min_f32_e32 v4, v4, v20
; GFX11-NEXT: v_min_f32_e32 v20, v80, v71
-; GFX11-NEXT: v_dual_min_f32 v19, v82, v81 :: v_dual_and_b32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_min_f32 v9, v9, v25 :: v_dual_and_b32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_min_f32_e32 v25, v54, v53
+; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v80, 0x400000, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_min_f32 v10, v10, v26 :: v_dual_and_b32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_dual_min_f32 v12, v12, v28 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_min_f32_e32 v28, v48, v39
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_min_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_dual_min_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_min_f32 v27, v50, v49 :: v_dual_min_f32 v26, v52, v51
-; GFX11-NEXT: v_dual_min_f32 v29, v38, v37 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
+; GFX11-NEXT: v_dual_min_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_dual_min_f32 v10, v10, v26 :: v_dual_and_b32 v29, 0xffff0000, v29
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_min_f32_e32 v26, v52, v51
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_min_f32_e32 v6, v6, v22
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-NEXT: v_dual_min_f32 v11, v11, v27 :: v_dual_lshlrev_b32 v34, 16, v14
+; GFX11-NEXT: v_dual_min_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v33, 16, v30
+; GFX11-NEXT: v_dual_min_f32 v27, v50, v49 :: v_dual_lshlrev_b32 v38, 16, v12
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_dual_min_f32 v25, v54, v53 :: v_dual_and_b32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_dual_min_f32 v13, v13, v29 :: v_dual_and_b32 v30, 0xffff0000, v30
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_min_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_dual_min_f32 v12, v12, v28 :: v_dual_and_b32 v15, 0xffff0000, v15
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_min_f32_e32 v14, v14, v30
+; GFX11-NEXT: v_min_f32_e32 v28, v48, v39
; GFX11-NEXT: v_dual_min_f32 v30, v36, v35 :: v_dual_min_f32 v33, v34, v33
-; GFX11-NEXT: v_and_or_b32 v48, v13, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
; GFX11-NEXT: v_bfe_u32 v35, v14, 16, 1
-; GFX11-NEXT: v_and_or_b32 v36, v14, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_bfe_u32 v16, v33, 16, 1
-; GFX11-NEXT: v_and_or_b32 v34, v33, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_add3_u32 v35, v35, v14, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v30
; GFX11-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v38, v30, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_add3_u32 v37, v37, v30, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v50, v29, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v13
+; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v34, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-NEXT: v_or_b32_e32 v50, 0x400000, v29
; GFX11-NEXT: v_bfe_u32 v51, v12, 16, 1
; GFX11-NEXT: v_add3_u32 v49, v49, v29, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v52, v12, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX11-NEXT: v_cndmask_b32_e32 v14, v35, v36, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
; GFX11-NEXT: v_add3_u32 v51, v51, v12, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v54, v28, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v28
; GFX11-NEXT: v_bfe_u32 v55, v11, 16, 1
-; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v30, v37, v38, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_and_or_b32 v64, v11, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v64, 0x400000, v11
; GFX11-NEXT: v_bfe_u32 v65, v27, 16, 1
; GFX11-NEXT: v_add3_u32 v55, v55, v11, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v66, v27, s0, 0x400000
; GFX11-NEXT: v_cndmask_b32_e32 v13, v39, v48, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-NEXT: v_or_b32_e32 v66, 0x400000, v27
; GFX11-NEXT: v_bfe_u32 v67, v10, 16, 1
; GFX11-NEXT: v_add3_u32 v65, v65, v27, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v68, v10, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX11-NEXT: v_cndmask_b32_e32 v29, v49, v50, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
; GFX11-NEXT: v_add3_u32 v67, v67, v10, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v70, v26, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
-; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v70, 0x400000, v26
+; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v12, v51, v52, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: v_and_or_b32 v80, v9, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
-; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v82, v25, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v25
+; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
+; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v28, v53, v54, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
-; GFX11-NEXT: v_and_or_b32 v102, v22, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v84, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v100, 0x400000, v6
; GFX11-NEXT: v_cndmask_b32_e32 v11, v55, v64, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v116, v4, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
-; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
+; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v22
+; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v27, v65, v66, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_and_or_b32 v118, v20, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
-; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v130, v19, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v114, 0x400000, v21
+; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v10, v67, v68, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
-; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v134, v18, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v116, 0x400000, v4
+; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
+; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v118, 0x400000, v20
; GFX11-NEXT: v_cndmask_b32_e32 v26, v69, v70, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v146, v17, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v134, 0x400000, v18
; GFX11-NEXT: v_bfe_u32 v147, v0, 16, 1
-; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v9, v71, v80, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v33, v0, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v147, v147, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v132, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v25, v81, v82, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v128, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v132, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
+; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
+; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v8, v83, v84, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v24, v85, v86, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v7, v87, v96, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
@@ -21520,22 +21052,21 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v2, v131, v132, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v3, v119, v128, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
-; GFX11-NEXT: v_min_f32_e32 v17, v32, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_min_f32 v17, v31, v17 :: v_dual_and_b32 v18, 0xffff0000, v32
; GFX11-NEXT: v_min_f32_e32 v15, v15, v18
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v18, v17, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_or_b32 v21, v15, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX11-NEXT: v_add3_u32 v18, v18, v17, 0x7fff
; GFX11-NEXT: v_add3_u32 v19, v19, v15, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -21594,8 +21125,7 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -21609,9 +21139,8 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -21622,10 +21151,9 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -21637,11 +21165,10 @@ define bfloat @v_maxnum_bf16(bfloat %a, bfloat %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -21707,16 +21234,14 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX8-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -21729,20 +21254,18 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX9-NEXT: v_max_f32_e32 v2, v3, v2
-; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -21756,14 +21279,13 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_max_f32_e32 v2, v3, v2
; GFX10-NEXT: v_max_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
@@ -21779,16 +21301,15 @@ define <2 x bfloat> @v_maxnum_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_max_f32_e32 v0, v0, v1
; GFX11-NEXT: v_max_f32_e32 v2, v3, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
@@ -21870,8 +21391,7 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -21883,16 +21403,14 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX8-NEXT: v_max_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -21908,27 +21426,24 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX9-NEXT: v_max_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v0
; GFX9-NEXT: v_max_f32_e32 v3, v4, v3
-; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_max_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -21946,18 +21461,17 @@ define <3 x bfloat> @v_maxnum_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_max_f32_e32 v4, v5, v4
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_max_f32_e32 v0, v0, v2
; GFX10-NEXT: v_max_f32_e32 v1, v1, v3
; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -22057,17 +21571,15 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX8-NEXT: v_max_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -22078,16 +21590,14 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX8-NEXT: v_max_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -22102,38 +21612,34 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v1
; GFX9-NEXT: v_max_f32_e32 v4, v5, v4
-; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_max_f32_e32 v1, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX9-NEXT: v_max_f32_e32 v3, v5, v3
-; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_max_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -22154,31 +21660,30 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_max_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_max_f32_e32 v5, v7, v6
-; GFX10-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
+; GFX10-NEXT: v_max_f32_e32 v3, v7, v6
+; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX10-NEXT: v_max_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_and_or_b32 v6, v4, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v1, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v6, v5, s4, 0x400000
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
+; GFX10-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maxnum_v4bf16:
@@ -22186,45 +21691,42 @@ define <4 x bfloat> @v_maxnum_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v2 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_max_f32_e32 v4, v5, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_max_f32 v0, v0, v2 :: v_dual_and_b32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_max_f32_e32 v1, v1, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v5, v7, v6
-; GFX11-NEXT: v_and_or_b32 v6, v4, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-NEXT: v_dual_max_f32 v3, v7, v6 :: v_dual_max_f32 v4, v5, v4
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11-NEXT: v_add3_u32 v3, v3, v4, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v9, v1, s0, 0x400000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-NEXT: v_bfe_u32 v5, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v6, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add3_u32 v4, v7, v5, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v5, s0, 0x400000
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v0, s0, 0x400000
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v9, vcc_lo
-; GFX11-NEXT: v_perm_b32 v0, v0, v4, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = call <4 x bfloat> @llvm.maxnum.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b)
ret <4 x bfloat> %op
@@ -22380,17 +21882,15 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v9, vcc, 0x7fff, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX8-NEXT: v_max_f32_e32 v3, v3, v7
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3
; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v6
@@ -22401,16 +21901,14 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX8-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2
; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5
@@ -22421,16 +21919,14 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX8-NEXT: v_max_f32_e32 v1, v1, v5
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
@@ -22441,16 +21937,14 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX8-NEXT: v_max_f32_e32 v0, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v9, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
@@ -22469,74 +21963,66 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX9-NEXT: v_max_f32_e32 v8, v9, v8
-; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v8
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v9, v8, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_f32_e32 v3, v3, v7
; GFX9-NEXT: v_add3_u32 v9, v9, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
-; GFX9-NEXT: v_max_f32_e32 v3, v3, v7
-; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v7, v7, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v6
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v2
; GFX9-NEXT: v_max_f32_e32 v7, v9, v7
-; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v7
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v9, v7, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v6
; GFX9-NEXT: v_add3_u32 v9, v9, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
-; GFX9-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v6, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v6, v6, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; GFX9-NEXT: v_max_f32_e32 v6, v9, v6
-; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v6
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v5
; GFX9-NEXT: v_add3_u32 v9, v9, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX9-NEXT: v_max_f32_e32 v1, v1, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v5, v5, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v0
; GFX9-NEXT: v_max_f32_e32 v5, v9, v5
-; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v5
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v4
; GFX9-NEXT: v_add3_u32 v9, v9, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX9-NEXT: v_max_f32_e32 v0, v0, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v9, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -22553,62 +22039,61 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v3
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_max_f32_e32 v8, v9, v8
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v6
-; GFX10-NEXT: v_max_f32_e32 v3, v3, v7
; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
-; GFX10-NEXT: v_bfe_u32 v10, v8, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v8, s4, 0x400000
-; GFX10-NEXT: v_max_f32_e32 v9, v11, v9
-; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX10-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX10-NEXT: v_max_f32_e32 v7, v10, v9
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX10-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
; GFX10-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX10-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX10-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v11, v3, 16, 1
+; GFX10-NEXT: v_bfe_u32 v12, v7, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5
-; GFX10-NEXT: v_and_or_b32 v12, v9, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX10-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX10-NEXT: v_lshlrev_b32_e32 v11, 16, v1
; GFX10-NEXT: v_bfe_u32 v13, v2, 16, 1
-; GFX10-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX10-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX10-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX10-NEXT: v_max_f32_e32 v6, v10, v6
+; GFX10-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_max_f32_e32 v6, v11, v6
-; GFX10-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v11, v2, s4, 0x400000
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v11, v12, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v12, v6, 16, 1
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_max_f32_e32 v1, v1, v5
; GFX10-NEXT: v_max_f32_e32 v5, v15, v13
-; GFX10-NEXT: v_and_or_b32 v14, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v14, 0x400000, v3
; GFX10-NEXT: v_max_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
; GFX10-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX10-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX10-NEXT: v_bfe_u32 v12, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX10-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v15, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v11, v5, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
-; GFX10-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc_lo
+; GFX10-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v0, s4, 0x400000
-; GFX10-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v0
+; GFX10-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -22616,81 +22101,80 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
-; GFX10-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
+; GFX10-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maxnum_v8bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_max_f32 v8, v9, v8 :: v_dual_lshlrev_b32 v9, 16, v6
-; GFX11-NEXT: v_bfe_u32 v10, v8, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_max_f32 v8, v9, v8 :: v_dual_and_b32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_max_f32_e32 v9, v11, v9
-; GFX11-NEXT: v_add3_u32 v10, v10, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_and_or_b32 v12, v9, s0, 0x400000
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: v_bfe_u32 v11, v8, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_max_f32_e32 v3, v3, v7
-; GFX11-NEXT: v_and_or_b32 v7, v8, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v8, v9, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v10, v7, vcc_lo
-; GFX11-NEXT: v_add3_u32 v8, v8, v9, 0x7fff
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v9, v13, v2, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_max_f32_e32 v7, v10, v9
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v10, v11, v8, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v8, v12, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v14, v3, s0, 0x400000
-; GFX11-NEXT: v_max_f32_e32 v0, v0, v4
-; GFX11-NEXT: v_add3_u32 v10, v11, v3, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_max_f32 v6, v11, v6 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_and_or_b32 v11, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v12, v7, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v10, v9, vcc_lo
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11-NEXT: v_add3_u32 v9, v11, v3, 0x7fff
+; GFX11-NEXT: v_add3_u32 v11, v12, v7, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v1
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_cndmask_b32 v7, v11, v12 :: v_dual_max_f32 v2, v2, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_bfe_u32 v13, v2, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f32_e32 v6, v10, v6
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v2
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11-NEXT: v_add3_u32 v10, v13, v2, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v12, v6, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v9, v11, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v9, v6, s0, 0x400000
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v10, v11, vcc_lo
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11-NEXT: v_max_f32_e32 v1, v1, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_perm_b32 v2, v2, v7, 0x7060302
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v4
; GFX11-NEXT: v_add3_u32 v4, v12, v6, 0x7fff
-; GFX11-NEXT: v_perm_b32 v2, v2, v8, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v9, vcc_lo
+; GFX11-NEXT: v_dual_max_f32 v1, v1, v5 :: v_dual_cndmask_b32 v4, v4, v10
; GFX11-NEXT: v_max_f32_e32 v5, v15, v13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v11, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v13, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v15, v1, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_or_b32_e32 v15, 0x400000, v1
; GFX11-NEXT: v_bfe_u32 v12, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v11, v5, s0, 0x400000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_add3_u32 v6, v11, v1, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_add3_u32 v9, v12, v5, 0x7fff
+; GFX11-NEXT: v_add3_u32 v10, v12, v5, 0x7fff
; GFX11-NEXT: v_add3_u32 v12, v13, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v13, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v9, v11, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v5, v10, v11, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_cndmask_b32_e32 v0, v12, v13, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -22699,9 +22183,9 @@ define <8 x bfloat> @v_maxnum_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b) {
; GFX11-NEXT: v_cndmask_b32_e32 v1, v6, v15, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v10, v14, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v9, v14, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_perm_b32 v3, v3, v7, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v8, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = call <8 x bfloat> @llvm.maxnum.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b)
ret <8 x bfloat> %op
@@ -22990,16 +22474,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX8-NEXT: v_max_f32_e32 v7, v7, v15
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
; GFX8-NEXT: v_bfe_u32 v15, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v7
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v14
@@ -23010,16 +22492,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX8-NEXT: v_max_f32_e32 v6, v6, v14
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
; GFX8-NEXT: v_bfe_u32 v14, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v14, vcc, v14, v6
; GFX8-NEXT: v_add_u32_e32 v14, vcc, s4, v14
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v13
@@ -23030,16 +22510,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX8-NEXT: v_max_f32_e32 v5, v5, v13
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_bfe_u32 v13, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v5
; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v12
@@ -23050,16 +22528,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX8-NEXT: v_max_f32_e32 v4, v4, v12
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_bfe_u32 v12, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v4
; GFX8-NEXT: v_add_u32_e32 v12, vcc, s4, v12
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11
@@ -23070,16 +22546,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX8-NEXT: v_max_f32_e32 v3, v3, v11
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_bfe_u32 v11, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v3
; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v10
@@ -23090,16 +22564,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX8-NEXT: v_max_f32_e32 v2, v2, v10
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_bfe_u32 v10, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v2
; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v9
@@ -23110,16 +22582,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX8-NEXT: v_max_f32_e32 v1, v1, v9
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v1
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v8
@@ -23130,16 +22600,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX8-NEXT: v_max_f32_e32 v0, v0, v8
-; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_bfe_u32 v8, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v0
; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8
-; GFX8-NEXT: v_and_b32_e32 v17, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
@@ -23166,146 +22634,130 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
-; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v16
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v17, v16, 16, 1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_f32_e32 v7, v7, v15
; GFX9-NEXT: v_add3_u32 v17, v17, v16, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v16
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v16, v16
-; GFX9-NEXT: v_max_f32_e32 v7, v7, v15
-; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v15, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v16, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v15, v15, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v15, 16, v14
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
; GFX9-NEXT: v_max_f32_e32 v15, v17, v15
-; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v15
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v17, v15, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v6, v6, v14
; GFX9-NEXT: v_add3_u32 v17, v17, v15, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v15
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
-; GFX9-NEXT: v_max_f32_e32 v6, v6, v14
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v14, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v14, v14, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v14, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v14, 16, v13
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
; GFX9-NEXT: v_max_f32_e32 v14, v17, v14
-; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v14
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v17, v14, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v5, v5, v13
; GFX9-NEXT: v_add3_u32 v17, v17, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
-; GFX9-NEXT: v_max_f32_e32 v5, v5, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v13, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v13, v13, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v13, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 16, v12
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
; GFX9-NEXT: v_max_f32_e32 v13, v17, v13
-; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v13
; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v17, v13, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v4, v4, v12
; GFX9-NEXT: v_add3_u32 v17, v17, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v13
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_max_f32_e32 v4, v4, v12
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v12, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v12, v12, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v12, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_max_f32_e32 v12, v17, v12
-; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v12
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v17, v12, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v3, v3, v11
; GFX9-NEXT: v_add3_u32 v17, v17, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
-; GFX9-NEXT: v_max_f32_e32 v3, v3, v11
-; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v11, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v11, v11, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 16, v10
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
; GFX9-NEXT: v_max_f32_e32 v11, v17, v11
-; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v11
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v17, v11, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v10
; GFX9-NEXT: v_add3_u32 v17, v17, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
-; GFX9-NEXT: v_max_f32_e32 v2, v2, v10
-; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v10, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v10, v10, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v10, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v9
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v1
; GFX9-NEXT: v_max_f32_e32 v10, v17, v10
-; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v10
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v17, v10, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v9
; GFX9-NEXT: v_add3_u32 v17, v17, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
-; GFX9-NEXT: v_max_f32_e32 v1, v1, v9
-; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v9, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v9, v9, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v17, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 16, v8
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v0
; GFX9-NEXT: v_max_f32_e32 v9, v17, v9
-; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v18, 0xff800000, v9
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v17, v9, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v8
; GFX9-NEXT: v_add3_u32 v17, v17, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX9-NEXT: v_or_b32_e32 v18, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
-; GFX9-NEXT: v_max_f32_e32 v0, v0, v8
-; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v17, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v17, v18, vcc
; GFX9-NEXT: v_add3_u32 v8, v8, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX9-NEXT: v_or_b32_e32 v17, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v17, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -23326,27 +22778,26 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v7
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_max_f32_e32 v16, v17, v16
; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX10-NEXT: v_max_f32_e32 v7, v7, v15
; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX10-NEXT: v_bfe_u32 v15, v16, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v16, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX10-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
; GFX10-NEXT: v_max_f32_e32 v17, v18, v17
; GFX10-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
; GFX10-NEXT: v_max_f32_e32 v6, v6, v14
; GFX10-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v7, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX10-NEXT: v_bfe_u32 v21, v17, 16, 1
; GFX10-NEXT: v_cndmask_b32_e32 v15, v15, v20, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v5
-; GFX10-NEXT: v_and_or_b32 v16, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX10-NEXT: v_add3_u32 v14, v21, v17, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
@@ -23360,7 +22811,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_max_f32_e32 v5, v5, v13
; GFX10-NEXT: v_cndmask_b32_e32 v14, v14, v16, vcc_lo
; GFX10-NEXT: v_add3_u32 v16, v18, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v13, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v12
; GFX10-NEXT: v_bfe_u32 v20, v17, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
@@ -23370,10 +22821,10 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e32 v6, v16, v13, vcc_lo
; GFX10-NEXT: v_max_f32_e32 v13, v19, v18
; GFX10-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v17, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v5, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX10-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX10-NEXT: v_max_f32_e32 v4, v4, v12
; GFX10-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
@@ -23383,14 +22834,14 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v19, v13, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX10-NEXT: v_max_f32_e32 v12, v18, v12
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v10
; GFX10-NEXT: v_max_f32_e32 v3, v3, v11
-; GFX10-NEXT: v_and_or_b32 v22, v12, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX10-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX10-NEXT: v_bfe_u32 v17, v12, 16, 1
; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v2
@@ -23402,12 +22853,12 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
; GFX10-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX10-NEXT: v_bfe_u32 v23, v18, 16, 1
; GFX10-NEXT: v_max_f32_e32 v2, v2, v10
; GFX10-NEXT: v_cndmask_b32_e32 v12, v17, v22, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_and_or_b32 v17, v18, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX10-NEXT: v_add3_u32 v10, v23, v18, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v1
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -23416,8 +22867,8 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_and_or_b32 v18, v2, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v2
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX10-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX10-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX10-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -23429,17 +22880,17 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v23, v19, 16, 1
; GFX10-NEXT: v_max_f32_e32 v1, v1, v9
; GFX10-NEXT: v_max_f32_e32 v9, v22, v20
-; GFX10-NEXT: v_and_or_b32 v22, v19, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v19
; GFX10-NEXT: v_max_f32_e32 v0, v0, v8
; GFX10-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
; GFX10-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
; GFX10-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX10-NEXT: v_and_or_b32 v24, v9, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v25, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v22, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_bfe_u32 v20, v0, 16, 1
; GFX10-NEXT: v_add3_u32 v23, v23, v9, 0x7fff
@@ -23468,12 +22919,11 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v15
; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_max_f32 v16, v17, v16 :: v_dual_lshlrev_b32 v17, 16, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_max_f32 v16, v17, v16 :: v_dual_and_b32 v15, 0xffff0000, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v14
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_and_or_b32 v20, v16, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v16
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_max_f32_e32 v17, v18, v17
; GFX11-NEXT: v_max_f32_e32 v6, v6, v14
@@ -23486,13 +22936,13 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_max_f32_e32 v7, v7, v15
; GFX11-NEXT: v_bfe_u32 v15, v16, 16, 1
; GFX11-NEXT: v_add3_u32 v15, v15, v16, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v16, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v17
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_dual_cndmask_b32 v15, v15, v20 :: v_dual_lshlrev_b32 v20, 16, v5
; GFX11-NEXT: v_bfe_u32 v19, v7, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX11-NEXT: v_add3_u32 v18, v19, v7, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v7, v18, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v18, v6, 16, 1
@@ -23514,32 +22964,32 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v11
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_max_f32_e32 v5, v5, v13
-; GFX11-NEXT: v_and_or_b32 v13, v6, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v13, 0x400000, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_cndmask_b32 v6, v16, v13 :: v_dual_max_f32 v13, v19, v18
; GFX11-NEXT: v_add3_u32 v16, v20, v17, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v18, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX11-NEXT: v_perm_b32 v6, v6, v14, 0x7060302
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v18, vcc_lo
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; GFX11-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v5, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-NEXT: v_max_f32_e32 v12, v18, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v19, v21, v5, 0x7fff
; GFX11-NEXT: v_bfe_u32 v21, v13, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v10
-; GFX11-NEXT: v_and_or_b32 v22, v12, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v12
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v5, v19, v20, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v21, v13, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v19, v13, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v19, 0x400000, v13
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
; GFX11-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX11-NEXT: v_and_or_b32 v21, v4, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v4
; GFX11-NEXT: v_perm_b32 v5, v5, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v13, v17, v19, vcc_lo
; GFX11-NEXT: v_bfe_u32 v17, v12, 16, 1
@@ -23555,7 +23005,7 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_bfe_u32 v23, v18, 16, 1
-; GFX11-NEXT: v_and_or_b32 v17, v18, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v17, 0x400000, v18
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_dual_max_f32 v2, v2, v10 :: v_dual_and_b32 v1, 0xffff0000, v1
; GFX11-NEXT: v_max_f32_e32 v3, v3, v11
@@ -23565,13 +23015,13 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_bfe_u32 v20, v3, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v19, v20, v3, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v20, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v3, v19, v20, vcc_lo
; GFX11-NEXT: v_bfe_u32 v19, v2, 16, 1
; GFX11-NEXT: v_lshlrev_b32_e32 v20, 16, v9
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
-; GFX11-NEXT: v_and_or_b32 v18, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v18, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v3, v3, v12, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v10, v10, v17, vcc_lo
; GFX11-NEXT: v_add3_u32 v17, v19, v2, 0x7fff
@@ -23588,13 +23038,13 @@ define <16 x bfloat> @v_maxnum_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b) {
; GFX11-NEXT: v_max_f32_e32 v9, v22, v20
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add3_u32 v20, v23, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v22, v19, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v25, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v19
+; GFX11-NEXT: v_or_b32_e32 v25, 0x400000, v0
; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v23, v9, 16, 1
-; GFX11-NEXT: v_and_or_b32 v24, v9, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v24, 0x400000, v9
; GFX11-NEXT: v_cndmask_b32_e32 v19, v20, v22, vcc_lo
-; GFX11-NEXT: v_and_or_b32 v22, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v22, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_bfe_u32 v20, v0, 16, 1
@@ -24289,16 +23739,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX8-NEXT: v_max_f32_e32 v14, v14, v30
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
; GFX8-NEXT: v_bfe_u32 v30, v14, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX8-NEXT: v_add_u32_e32 v30, vcc, v30, v14
; GFX8-NEXT: v_add_u32_e32 v30, vcc, s4, v30
-; GFX8-NEXT: v_and_b32_e32 v32, 0xff800000, v14
-; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX8-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29
@@ -24320,29 +23768,25 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_max_f32_e32 v30, v15, v30
; GFX8-NEXT: v_bfe_u32 v15, v33, 16, 1
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v33
; GFX8-NEXT: v_add_u32_e32 v15, vcc, s4, v15
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
; GFX8-NEXT: v_bfe_u32 v33, v30, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v30
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v30
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v30
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
; GFX8-NEXT: v_cndmask_b32_e32 v30, v33, v34, vcc
; GFX8-NEXT: v_bfe_u32 v33, v32, 16, 1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, v33, v32
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
; GFX8-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v29, vcc, v29, v13
; GFX8-NEXT: v_add_u32_e32 v29, vcc, s4, v29
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v13
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
@@ -24353,16 +23797,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v29
; GFX8-NEXT: v_max_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
; GFX8-NEXT: v_bfe_u32 v28, v12, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v28, vcc, v28, v12
; GFX8-NEXT: v_add_u32_e32 v28, vcc, s4, v28
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
@@ -24373,16 +23815,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX8-NEXT: v_max_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
; GFX8-NEXT: v_bfe_u32 v27, v11, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v27, vcc, v27, v11
; GFX8-NEXT: v_add_u32_e32 v27, vcc, s4, v27
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v11
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX8-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
@@ -24393,16 +23833,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX8-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
; GFX8-NEXT: v_bfe_u32 v26, v10, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v26, vcc, v26, v10
; GFX8-NEXT: v_add_u32_e32 v26, vcc, s4, v26
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v10
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX8-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
@@ -24413,16 +23851,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX8-NEXT: v_max_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
; GFX8-NEXT: v_bfe_u32 v25, v9, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v25, vcc, v25, v9
; GFX8-NEXT: v_add_u32_e32 v25, vcc, s4, v25
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v9
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
@@ -24433,16 +23869,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX8-NEXT: v_max_f32_e32 v8, v8, v24
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
; GFX8-NEXT: v_bfe_u32 v24, v8, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v24, vcc, v24, v8
; GFX8-NEXT: v_add_u32_e32 v24, vcc, s4, v24
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v8
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
@@ -24453,16 +23887,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX8-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
; GFX8-NEXT: v_bfe_u32 v23, v7, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v23, vcc, v23, v7
; GFX8-NEXT: v_add_u32_e32 v23, vcc, s4, v23
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v7
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
@@ -24473,16 +23905,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX8-NEXT: v_max_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
; GFX8-NEXT: v_bfe_u32 v22, v6, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v22, vcc, v22, v6
; GFX8-NEXT: v_add_u32_e32 v22, vcc, s4, v22
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
@@ -24493,16 +23923,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX8-NEXT: v_max_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
; GFX8-NEXT: v_bfe_u32 v21, v5, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v21, vcc, v21, v5
; GFX8-NEXT: v_add_u32_e32 v21, vcc, s4, v21
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
@@ -24513,16 +23941,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX8-NEXT: v_max_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
; GFX8-NEXT: v_bfe_u32 v20, v4, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v20, vcc, v20, v4
; GFX8-NEXT: v_add_u32_e32 v20, vcc, s4, v20
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
@@ -24533,16 +23959,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX8-NEXT: v_max_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
; GFX8-NEXT: v_bfe_u32 v19, v3, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v19, vcc, v19, v3
; GFX8-NEXT: v_add_u32_e32 v19, vcc, s4, v19
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
@@ -24553,16 +23977,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX8-NEXT: v_max_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
; GFX8-NEXT: v_bfe_u32 v18, v2, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v18, vcc, v18, v2
; GFX8-NEXT: v_add_u32_e32 v18, vcc, s4, v18
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
@@ -24573,16 +23995,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX8-NEXT: v_max_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
; GFX8-NEXT: v_bfe_u32 v17, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v1
; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
@@ -24593,16 +24013,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v33, vcc, s4, v33
-; GFX8-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX8-NEXT: v_max_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX8-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0
; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
-; GFX8-NEXT: v_and_b32_e32 v33, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX8-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -24643,292 +24061,260 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
; GFX9-NEXT: v_max_f32_e32 v31, v32, v31
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v31
; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfe_u32 v32, v31, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v14, v14, v30
; GFX9-NEXT: v_add3_u32 v32, v32, v31, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v31
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
-; GFX9-NEXT: v_max_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_bfe_u32 v30, v14, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v32, 0xff800000, v14
+; GFX9-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc
; GFX9-NEXT: v_add3_u32 v30, v30, v14, s4
-; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v14
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
; GFX9-NEXT: v_cndmask_b32_e32 v14, v30, v32, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
+; GFX9-NEXT: v_max_f32_e32 v30, v32, v30
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_max_f32_e32 v13, v13, v29
-; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
-; GFX9-NEXT: v_max_f32_e32 v30, v32, v30
; GFX9-NEXT: v_bfe_u32 v32, v30, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v30
+; GFX9-NEXT: v_max_f32_e32 v13, v13, v29
; GFX9-NEXT: v_add3_u32 v32, v32, v30, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v30
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
+; GFX9-NEXT: v_bfe_u32 v29, v13, 16, 1
; GFX9-NEXT: v_cndmask_b32_e32 v30, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX9-NEXT: v_add3_u32 v29, v29, v13, s4
+; GFX9-NEXT: v_or_b32_e32 v32, 0x400000, v13
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v29, v32, vcc
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
+; GFX9-NEXT: v_max_f32_e32 v32, v32, v29
+; GFX9-NEXT: buffer_load_dword v29, off, s[0:3], s32
+; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_max_f32_e32 v12, v12, v28
+; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
+; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v29
-; GFX9-NEXT: v_max_f32_e32 v32, v32, v33
+; GFX9-NEXT: v_lshlrev_b32_e32 v34, 16, v29
+; GFX9-NEXT: v_max_f32_e32 v33, v33, v34
; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX9-NEXT: v_max_f32_e32 v29, v15, v29
-; GFX9-NEXT: v_bfe_u32 v15, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v32
-; GFX9-NEXT: v_add3_u32 v15, v15, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v29, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v29
-; GFX9-NEXT: v_add3_u32 v32, v32, v29, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_bfe_u32 v15, v33, 16, 1
+; GFX9-NEXT: v_add3_u32 v15, v15, v33, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v33
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
+; GFX9-NEXT: v_bfe_u32 v33, v29, 16, 1
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v15, v34, vcc
+; GFX9-NEXT: v_add3_u32 v33, v33, v29, s4
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v29
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
-; GFX9-NEXT: v_cndmask_b32_e32 v29, v32, v33, vcc
-; GFX9-NEXT: v_bfe_u32 v32, v13, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v13
-; GFX9-NEXT: v_add3_u32 v32, v32, v13, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
-; GFX9-NEXT: v_cndmask_b32_e32 v13, v32, v33, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v12
-; GFX9-NEXT: v_max_f32_e32 v32, v33, v32
+; GFX9-NEXT: v_cndmask_b32_e32 v29, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v33, v32, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v32
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX9-NEXT: v_add3_u32 v33, v33, v32, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v32
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
-; GFX9-NEXT: v_max_f32_e32 v12, v12, v28
; GFX9-NEXT: v_cndmask_b32_e32 v32, v33, v34, vcc
-; GFX9-NEXT: v_bfe_u32 v28, v12, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v12
-; GFX9-NEXT: v_add3_u32 v28, v28, v12, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v12
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
; GFX9-NEXT: v_cndmask_b32_e32 v12, v28, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v11
; GFX9-NEXT: v_max_f32_e32 v28, v33, v28
-; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v28
; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_bfe_u32 v33, v28, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v11, v11, v27
; GFX9-NEXT: v_add3_u32 v33, v33, v28, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v28
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
-; GFX9-NEXT: v_max_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v27, v11, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v28, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v27, v27, v11, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v11
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
; GFX9-NEXT: v_cndmask_b32_e32 v11, v27, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v10
; GFX9-NEXT: v_max_f32_e32 v27, v33, v27
-; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v27
; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_bfe_u32 v33, v27, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v10, v10, v26
; GFX9-NEXT: v_add3_u32 v33, v33, v27, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v27
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
-; GFX9-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v26, v10, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v27, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v26, v26, v10, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v10
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
; GFX9-NEXT: v_cndmask_b32_e32 v10, v26, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v9
; GFX9-NEXT: v_max_f32_e32 v26, v33, v26
-; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v26
; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_bfe_u32 v33, v26, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v9, v9, v25
; GFX9-NEXT: v_add3_u32 v33, v33, v26, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v26
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
-; GFX9-NEXT: v_max_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v25, v9, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v9
+; GFX9-NEXT: v_cndmask_b32_e32 v26, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v25, v25, v9, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
; GFX9-NEXT: v_cndmask_b32_e32 v9, v25, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v8
; GFX9-NEXT: v_max_f32_e32 v25, v33, v25
-; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v25
; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_bfe_u32 v33, v25, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v8, v8, v24
; GFX9-NEXT: v_add3_u32 v33, v33, v25, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v25
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
-; GFX9-NEXT: v_max_f32_e32 v8, v8, v24
-; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v24, v8, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v25, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v24, v24, v8, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX9-NEXT: v_cndmask_b32_e32 v8, v24, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
; GFX9-NEXT: v_max_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v24
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_bfe_u32 v33, v24, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v7, v7, v23
; GFX9-NEXT: v_add3_u32 v33, v33, v24, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v24
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
-; GFX9-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v23, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v24, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v23, v23, v7, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
; GFX9-NEXT: v_cndmask_b32_e32 v7, v23, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
; GFX9-NEXT: v_max_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v23
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_bfe_u32 v33, v23, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v6, v6, v22
; GFX9-NEXT: v_add3_u32 v33, v33, v23, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v23
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
-; GFX9-NEXT: v_max_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v22, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v23, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v22, v22, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v22, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
; GFX9-NEXT: v_max_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v22
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_bfe_u32 v33, v22, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v5, v5, v21
; GFX9-NEXT: v_add3_u32 v33, v33, v22, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v22
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
-; GFX9-NEXT: v_max_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v21, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v22, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v21, v21, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v5
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v5, v21, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
; GFX9-NEXT: v_max_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v21
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_bfe_u32 v33, v21, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v4, v4, v20
; GFX9-NEXT: v_add3_u32 v33, v33, v21, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v21
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
-; GFX9-NEXT: v_max_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v20, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v21, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v20, v20, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v20, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
; GFX9-NEXT: v_max_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v20
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_bfe_u32 v33, v20, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v3, v3, v19
; GFX9-NEXT: v_add3_u32 v33, v33, v20, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v20
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
-; GFX9-NEXT: v_max_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v19, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v20, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v19, v19, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v19, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
; GFX9-NEXT: v_max_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v19
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_bfe_u32 v33, v19, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v18
; GFX9-NEXT: v_add3_u32 v33, v33, v19, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v19
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
-; GFX9-NEXT: v_max_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v18, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v18, v18, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v18, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; GFX9-NEXT: v_max_f32_e32 v18, v33, v18
-; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v18
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v1, v1, v17
; GFX9-NEXT: v_add3_u32 v33, v33, v18, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v18
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
-; GFX9-NEXT: v_max_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v17, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v18, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v17, v17, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v33, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
; GFX9-NEXT: v_max_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v34, 0xff800000, v17
; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v33, v17, 16, 1
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v16
; GFX9-NEXT: v_add3_u32 v33, v33, v17, s4
-; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v34
+; GFX9-NEXT: v_or_b32_e32 v34, 0x400000, v17
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
-; GFX9-NEXT: v_max_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_bfe_u32 v16, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v33, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v17, v33, v34, vcc
; GFX9-NEXT: v_add3_u32 v16, v16, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v33
+; GFX9-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v33, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -24953,7 +24339,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-LABEL: v_maxnum_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32
; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
@@ -25018,7 +24404,6 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v55, v11, 16, 1
; GFX10-NEXT: v_bfe_u32 v65, v49, 16, 1
; GFX10-NEXT: v_bfe_u32 v67, v10, 16, 1
-; GFX10-NEXT: s_mov_b32 s23, 0xff800000
; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
@@ -25034,10 +24419,10 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_max_f32_e32 v1, v1, v17
; GFX10-NEXT: v_max_f32_e32 v17, v26, v50
; GFX10-NEXT: v_max_f32_e32 v0, v0, v16
-; GFX10-NEXT: v_and_or_b32 v54, v39, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v64, v11, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v66, v49, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v68, v10, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v54, 0x400000, v39
+; GFX10-NEXT: v_or_b32_e32 v64, 0x400000, v11
+; GFX10-NEXT: v_or_b32_e32 v66, 0x400000, v49
+; GFX10-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v39, v39
; GFX10-NEXT: v_add3_u32 v39, v53, v39, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v11, v11
@@ -25075,28 +24460,28 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v27, v14, 16, 1
; GFX10-NEXT: v_bfe_u32 v29, v35, 16, 1
; GFX10-NEXT: v_bfe_u32 v34, v13, 16, 1
-; GFX10-NEXT: v_and_or_b32 v48, v37, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v52, v12, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v48, 0x400000, v37
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v37, v37
; GFX10-NEXT: v_add3_u32 v37, v38, v37, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v12, v12
; GFX10-NEXT: v_add3_u32 v12, v50, v12, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e64 s10, v18, v18
; GFX10-NEXT: v_add3_u32 v54, v54, v18, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v18, v18, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v18, 0x400000, v18
; GFX10-NEXT: v_cmp_u_f32_e64 s11, v1, v1
; GFX10-NEXT: v_add3_u32 v64, v64, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v1, v1, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e64 s12, v17, v17
; GFX10-NEXT: v_add3_u32 v66, v66, v17, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v17, v17, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s22, v0, v0
; GFX10-NEXT: v_add3_u32 v68, v68, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v0, v0, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v26, v33, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v28, v14, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v30, v35, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v36, v13, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v0
+; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v33
+; GFX10-NEXT: v_or_b32_e32 v28, 0x400000, v14
+; GFX10-NEXT: v_or_b32_e32 v30, 0x400000, v35
+; GFX10-NEXT: v_or_b32_e32 v36, 0x400000, v13
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1
@@ -25115,12 +24500,12 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_cndmask_b32_e64 v17, v66, v17, s12
; GFX10-NEXT: v_cndmask_b32_e64 v0, v68, v0, s22
; GFX10-NEXT: v_cndmask_b32_e64 v1, v64, v1, s11
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
+; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v15
; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_and_or_b32 v27, v51, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v51
; GFX10-NEXT: v_bfe_u32 v35, v9, 16, 1
; GFX10-NEXT: v_bfe_u32 v38, v25, 16, 1
-; GFX10-NEXT: v_and_or_b32 v67, v24, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v67, 0x400000, v24
; GFX10-NEXT: v_cmp_u_f32_e64 s13, v51, v51
; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff
; GFX10-NEXT: v_bfe_u32 v51, v7, 16, 1
@@ -25137,51 +24522,51 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_bfe_u32 v36, v3, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s8, v19, v19
; GFX10-NEXT: v_add3_u32 v48, v48, v19, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v19, v19, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v19
; GFX10-NEXT: v_cmp_u_f32_e64 s9, v2, v2
; GFX10-NEXT: v_add3_u32 v52, v52, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v2, v2, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX10-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX10-NEXT: v_perm_b32 v1, v1, v18, 0x7060302
-; GFX10-NEXT: v_and_or_b32 v34, v9, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v50, v25, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v50, 0x400000, v25
; GFX10-NEXT: v_bfe_u32 v53, v8, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9
; GFX10-NEXT: v_add3_u32 v9, v35, v9, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v35, v7, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v35, 0x400000, v7
; GFX10-NEXT: v_cmp_u_f32_e64 s15, v25, v25
; GFX10-NEXT: v_add3_u32 v25, v38, v25, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v23, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s18, v7, v7
; GFX10-NEXT: v_add3_u32 v7, v51, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v51, v6, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v51, 0x400000, v6
; GFX10-NEXT: v_cmp_u_f32_e64 s20, v6, v6
; GFX10-NEXT: v_add3_u32 v6, v65, v6, 0x7fff
; GFX10-NEXT: v_bfe_u32 v65, v5, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v21, v21
; GFX10-NEXT: v_add3_u32 v26, v26, v21, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v21, v21, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v21
; GFX10-NEXT: v_cmp_u_f32_e64 s5, v4, v4
; GFX10-NEXT: v_add3_u32 v28, v28, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v4, v4, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX10-NEXT: v_cmp_u_f32_e64 s6, v20, v20
; GFX10-NEXT: v_add3_u32 v30, v30, v20, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v20, v20, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v20
; GFX10-NEXT: v_cmp_u_f32_e64 s7, v3, v3
; GFX10-NEXT: v_add3_u32 v36, v36, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v3, v3, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v3
; GFX10-NEXT: v_cndmask_b32_e64 v19, v48, v19, s8
; GFX10-NEXT: v_cndmask_b32_e64 v2, v52, v2, s9
-; GFX10-NEXT: v_and_or_b32 v55, v8, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v55, 0x400000, v8
; GFX10-NEXT: v_cmp_u_f32_e64 s16, v8, v8
; GFX10-NEXT: v_add3_u32 v8, v53, v8, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v53, v23, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v53, 0x400000, v23
; GFX10-NEXT: v_cmp_u_f32_e64 s19, v23, v23
; GFX10-NEXT: v_add3_u32 v23, v38, v23, 0x7fff
; GFX10-NEXT: v_bfe_u32 v38, v22, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX10-NEXT: v_add3_u32 v65, v65, v5, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v5, v5, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v5
; GFX10-NEXT: v_cndmask_b32_e64 v21, v26, v21, s4
; GFX10-NEXT: v_cndmask_b32_e64 v4, v28, v4, s5
; GFX10-NEXT: v_cndmask_b32_e64 v20, v30, v20, s6
@@ -25189,7 +24574,7 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX10-NEXT: v_cmp_u_f32_e64 s21, v22, v22
; GFX10-NEXT: v_add3_u32 v38, v38, v22, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v22, v22, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v22
; GFX10-NEXT: v_cndmask_b32_e32 v5, v65, v5, vcc_lo
; GFX10-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
@@ -25213,14 +24598,14 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX10-NEXT: v_perm_b32 v13, v13, v29, 0x7060302
; GFX10-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX10-NEXT: v_max_f32_e32 v17, v32, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v32
+; GFX10-NEXT: v_max_f32_e32 v17, v31, v17
; GFX10-NEXT: v_max_f32_e32 v15, v15, v18
; GFX10-NEXT: v_bfe_u32 v18, v17, 16, 1
; GFX10-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX10-NEXT: v_and_or_b32 v20, v17, s23, 0x400000
-; GFX10-NEXT: v_and_or_b32 v21, v15, s23, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v17
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
; GFX10-NEXT: v_cmp_u_f32_e64 s4, v15, v15
; GFX10-NEXT: v_add3_u32 v17, v18, v17, 0x7fff
@@ -25233,212 +24618,219 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-LABEL: v_maxnum_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: scratch_load_b32 v31, off, s32
+; GFX11-NEXT: scratch_load_b32 v32, off, s32
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v26
+; GFX11-NEXT: v_dual_max_f32 v5, v5, v21 :: v_dual_and_b32 v26, 0xffff0000, v26
; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_max_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
+; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
; GFX11-NEXT: v_bfe_u32 v135, v1, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_or_b32 v144, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v112, 0x400000, v5
+; GFX11-NEXT: v_or_b32_e32 v144, 0x400000, v1
+; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_add3_u32 v135, v135, v1, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_dual_max_f32 v17, v86, v85 :: v_dual_and_b32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_dual_max_f32 v7, v7, v23 :: v_dual_lshlrev_b32 v36, 16, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_dual_max_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v16 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_max_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_max_f32 v3, v3, v19 :: v_dual_lshlrev_b32 v54, 16, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
+; GFX11-NEXT: v_dual_max_f32 v19, v82, v81 :: v_dual_lshlrev_b32 v64, 16, v7
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v130, 0x400000, v19
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
+; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
+; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_max_f32 v17, v86, v85 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_dual_max_f32 v8, v8, v24 :: v_dual_lshlrev_b32 v39, 16, v27
+; GFX11-NEXT: v_or_b32_e32 v128, 0x400000, v3
+; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v146, 0x400000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_or_b32 v86, v24, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v96, v7, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
+; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v10
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_dual_max_f32 v24, v64, v55 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_max_f32_e32 v7, v7, v23
+; GFX11-NEXT: v_dual_max_f32 v23, v66, v65 :: v_dual_max_f32 v18, v84, v83
+; GFX11-NEXT: v_dual_max_f32 v9, v9, v25 :: v_dual_and_b32 v28, 0xffff0000, v28
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_bfe_u32 v85, v24, 16, 1
+; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v86, 0x400000, v24
+; GFX11-NEXT: v_or_b32_e32 v98, 0x400000, v23
+; GFX11-NEXT: v_bfe_u32 v87, v7, 16, 1
; GFX11-NEXT: v_add3_u32 v85, v85, v24, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_dual_max_f32 v23, v66, v65 :: v_dual_lshlrev_b32 v48, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_max_f32 v21, v70, v69 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_bfe_u32 v97, v23, 16, 1
-; GFX11-NEXT: v_max_f32_e32 v2, v2, v18
-; GFX11-NEXT: v_max_f32_e32 v18, v84, v83
-; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
-; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
-; GFX11-NEXT: v_bfe_u32 v103, v5, 16, 1
-; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_and_or_b32 v84, v8, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v98, v23, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v100, v6, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v112, v5, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v114, v21, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
-; GFX11-NEXT: v_add3_u32 v103, v103, v5, 0x7fff
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
+; GFX11-NEXT: v_add3_u32 v97, v97, v23, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_dual_max_f32 v3, v3, v19 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_max_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v51, 16, v25
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX11-NEXT: v_dual_max_f32 v4, v4, v20 :: v_dual_and_b32 v25, 0xffff0000, v25
+; GFX11-NEXT: v_or_b32_e32 v96, 0x400000, v7
+; GFX11-NEXT: v_add3_u32 v87, v87, v7, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_max_f32_e32 v4, v4, v20
; GFX11-NEXT: v_max_f32_e32 v20, v80, v71
-; GFX11-NEXT: v_dual_max_f32 v19, v82, v81 :: v_dual_and_b32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_max_f32 v9, v9, v25 :: v_dual_and_b32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_max_f32_e32 v25, v54, v53
+; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v80, 0x400000, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_max_f32 v10, v10, v26 :: v_dual_and_b32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_dual_max_f32 v12, v12, v28 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_max_f32_e32 v28, v48, v39
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_dual_max_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_dual_max_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_max_f32 v27, v50, v49 :: v_dual_max_f32 v26, v52, v51
-; GFX11-NEXT: v_dual_max_f32 v29, v38, v37 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
+; GFX11-NEXT: v_dual_max_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_dual_max_f32 v10, v10, v26 :: v_dual_and_b32 v29, 0xffff0000, v29
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_max_f32_e32 v26, v52, v51
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_max_f32_e32 v6, v6, v22
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11-NEXT: v_dual_max_f32 v11, v11, v27 :: v_dual_lshlrev_b32 v34, 16, v14
+; GFX11-NEXT: v_dual_max_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v33, 16, v30
+; GFX11-NEXT: v_dual_max_f32 v27, v50, v49 :: v_dual_lshlrev_b32 v38, 16, v12
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_dual_max_f32 v25, v54, v53 :: v_dual_and_b32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_dual_max_f32 v13, v13, v29 :: v_dual_and_b32 v30, 0xffff0000, v30
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_max_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_dual_max_f32 v12, v12, v28 :: v_dual_and_b32 v15, 0xffff0000, v15
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_max_f32_e32 v14, v14, v30
+; GFX11-NEXT: v_max_f32_e32 v28, v48, v39
; GFX11-NEXT: v_dual_max_f32 v30, v36, v35 :: v_dual_max_f32 v33, v34, v33
-; GFX11-NEXT: v_and_or_b32 v48, v13, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_bfe_u32 v39, v13, 16, 1
; GFX11-NEXT: v_bfe_u32 v35, v14, 16, 1
-; GFX11-NEXT: v_and_or_b32 v36, v14, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v36, 0x400000, v14
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_bfe_u32 v16, v33, 16, 1
-; GFX11-NEXT: v_and_or_b32 v34, v33, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v34, 0x400000, v33
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
-; GFX11-NEXT: v_bfe_u32 v37, v30, 16, 1
; GFX11-NEXT: v_add3_u32 v35, v35, v14, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v38, 0x400000, v30
; GFX11-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v38, v30, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_add3_u32 v37, v37, v30, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v50, v29, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v48, 0x400000, v13
+; GFX11-NEXT: v_bfe_u32 v49, v29, 16, 1
+; GFX11-NEXT: v_add3_u32 v39, v39, v13, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v16, v16, v34, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11-NEXT: v_or_b32_e32 v50, 0x400000, v29
; GFX11-NEXT: v_bfe_u32 v51, v12, 16, 1
; GFX11-NEXT: v_add3_u32 v49, v49, v29, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v52, v12, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v52, 0x400000, v12
; GFX11-NEXT: v_cndmask_b32_e32 v14, v35, v36, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v30, v30
+; GFX11-NEXT: v_bfe_u32 v53, v28, 16, 1
; GFX11-NEXT: v_add3_u32 v51, v51, v12, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v54, v28, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v54, 0x400000, v28
; GFX11-NEXT: v_bfe_u32 v55, v11, 16, 1
-; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v30, v37, v38, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
-; GFX11-NEXT: v_and_or_b32 v64, v11, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v53, v53, v28, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v64, 0x400000, v11
; GFX11-NEXT: v_bfe_u32 v65, v27, 16, 1
; GFX11-NEXT: v_add3_u32 v55, v55, v11, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v66, v27, s0, 0x400000
; GFX11-NEXT: v_cndmask_b32_e32 v13, v39, v48, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v29, v29
+; GFX11-NEXT: v_or_b32_e32 v66, 0x400000, v27
; GFX11-NEXT: v_bfe_u32 v67, v10, 16, 1
; GFX11-NEXT: v_add3_u32 v65, v65, v27, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v68, v10, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v68, 0x400000, v10
; GFX11-NEXT: v_cndmask_b32_e32 v29, v49, v50, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-NEXT: v_bfe_u32 v69, v26, 16, 1
; GFX11-NEXT: v_add3_u32 v67, v67, v10, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v70, v26, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v71, v9, 16, 1
-; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v70, 0x400000, v26
+; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v12, v51, v52, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v28, v28
-; GFX11-NEXT: v_and_or_b32 v80, v9, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v81, v25, 16, 1
-; GFX11-NEXT: v_add3_u32 v71, v71, v9, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v82, v25, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v69, v69, v26, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v82, 0x400000, v25
+; GFX11-NEXT: v_bfe_u32 v83, v8, 16, 1
+; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v28, v53, v54, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
-; GFX11-NEXT: v_add3_u32 v81, v81, v25, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
-; GFX11-NEXT: v_and_or_b32 v102, v22, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v84, 0x400000, v8
+; GFX11-NEXT: v_add3_u32 v83, v83, v8, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v99, v6, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v100, 0x400000, v6
; GFX11-NEXT: v_cndmask_b32_e32 v11, v55, v64, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v27, v27
-; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v116, v4, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
-; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v101, v22, 16, 1
+; GFX11-NEXT: v_add3_u32 v99, v99, v6, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v102, 0x400000, v22
+; GFX11-NEXT: v_bfe_u32 v113, v21, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v27, v65, v66, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
-; GFX11-NEXT: v_and_or_b32 v118, v20, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v129, v19, 16, 1
-; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v130, v19, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v101, v101, v22, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v114, 0x400000, v21
+; GFX11-NEXT: v_bfe_u32 v115, v4, 16, 1
+; GFX11-NEXT: v_add3_u32 v113, v113, v21, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v10, v67, v68, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
-; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
-; GFX11-NEXT: v_add3_u32 v129, v129, v19, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v134, v18, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v145, v17, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v116, 0x400000, v4
+; GFX11-NEXT: v_bfe_u32 v117, v20, 16, 1
+; GFX11-NEXT: v_add3_u32 v115, v115, v4, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v118, 0x400000, v20
; GFX11-NEXT: v_cndmask_b32_e32 v26, v69, v70, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v146, v17, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v117, v117, v20, 0x7fff
+; GFX11-NEXT: v_bfe_u32 v133, v18, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v134, 0x400000, v18
; GFX11-NEXT: v_bfe_u32 v147, v0, 16, 1
-; GFX11-NEXT: v_add3_u32 v145, v145, v17, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v9, v71, v80, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v25, v25
-; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v33, v0, s0, 0x400000
+; GFX11-NEXT: v_add3_u32 v133, v133, v18, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v33, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v147, v147, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v132, v2, s0, 0x400000
+; GFX11-NEXT: v_bfe_u32 v131, v2, 16, 1
; GFX11-NEXT: v_cndmask_b32_e32 v25, v81, v82, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v119, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v128, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v132, 0x400000, v2
; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
+; GFX11-NEXT: v_add3_u32 v131, v131, v2, 0x7fff
+; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v8, v83, v84, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
-; GFX11-NEXT: v_add3_u32 v119, v119, v3, 0x7fff
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v24, v85, v86, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
; GFX11-NEXT: v_perm_b32 v14, v14, v16, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v7, v87, v96, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
@@ -25477,22 +24869,21 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX11-NEXT: v_perm_b32 v0, v0, v17, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v2, v131, v132, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
; GFX11-NEXT: v_cndmask_b32_e32 v3, v119, v128, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_perm_b32 v3, v3, v20, 0x7060302
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v31
-; GFX11-NEXT: v_perm_b32 v2, v2, v19, 0x7060302
-; GFX11-NEXT: v_max_f32_e32 v17, v32, v17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v32
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_max_f32 v17, v31, v17 :: v_dual_and_b32 v18, 0xffff0000, v32
; GFX11-NEXT: v_max_f32_e32 v15, v15, v18
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v18, v17, 16, 1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v19, v15, 16, 1
-; GFX11-NEXT: v_and_or_b32 v20, v17, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v20, 0x400000, v17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
-; GFX11-NEXT: v_and_or_b32 v21, v15, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v21, 0x400000, v15
; GFX11-NEXT: v_add3_u32 v18, v18, v17, 0x7fff
; GFX11-NEXT: v_add3_u32 v19, v19, v15, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -25586,8 +24977,7 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25617,9 +25007,8 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25641,13 +25030,12 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v2, s4
; GFX10-NEXT: v_cmp_lt_f32_e64 s4, 0, v5
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v3, s4
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v2, 0x37800000, v1
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
; GFX10-NEXT: v_cmp_class_f32_e64 vcc_lo, v0, 0x260
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -25674,9 +25062,8 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, v2, s0
; GFX11-NEXT: v_cmp_lt_f32_e64 s0, 0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, v3, s0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: v_mul_f32_e32 v2, 0x37800000, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
@@ -25684,7 +25071,7 @@ define bfloat @v_sqrt_bf16(bfloat %a) {
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -25724,8 +25111,7 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25738,9 +25124,8 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
; GFX9-NEXT: v_ldexp_f32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25750,10 +25135,9 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_ldexp_f32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -25764,11 +25148,10 @@ define bfloat @v_ldexp_bf16_i32(bfloat %a, i32 %b) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_ldexp_f32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -25816,8 +25199,7 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25831,9 +25213,8 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
; GFX9-NEXT: v_frexp_mant_f32_e32 v0, v1
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25844,11 +25225,10 @@ define { bfloat, i16 } @v_frexp_bf16_i16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_frexp_mant_f32_e32 v0, v1
; GFX10-NEXT: v_frexp_exp_i32_f32_e32 v1, v1
; GFX10-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v3, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
@@ -25947,8 +25327,7 @@ define bfloat @v_log_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25978,9 +25357,8 @@ define bfloat @v_log_bf16(bfloat %a) {
; GFX9-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -25990,7 +25368,6 @@ define bfloat @v_log_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
; GFX10-NEXT: v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
@@ -26004,7 +25381,7 @@ define bfloat @v_log_bf16(bfloat %a) {
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX10-NEXT: v_sub_f32_e32 v0, v0, v2
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -26015,7 +25392,6 @@ define bfloat @v_log_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
; GFX11-NEXT: v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
@@ -26035,7 +25411,7 @@ define bfloat @v_log_bf16(bfloat %a) {
; GFX11-NEXT: v_sub_f32_e32 v0, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -26097,8 +25473,7 @@ define bfloat @v_log2_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26119,9 +25494,8 @@ define bfloat @v_log2_bf16(bfloat %a) {
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26131,7 +25505,6 @@ define bfloat @v_log2_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
; GFX10-NEXT: v_cndmask_b32_e64 v2, 1.0, 0x4f800000, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 0x42000000, vcc_lo
@@ -26139,7 +25512,7 @@ define bfloat @v_log2_bf16(bfloat %a) {
; GFX10-NEXT: v_log_f32_e32 v0, v0
; GFX10-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -26150,7 +25523,6 @@ define bfloat @v_log2_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
; GFX11-NEXT: v_cndmask_b32_e64 v2, 1.0, 0x4f800000, vcc_lo
@@ -26161,7 +25533,7 @@ define bfloat @v_log2_bf16(bfloat %a) {
; GFX11-NEXT: s_waitcnt_depctr 0xfff
; GFX11-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -26257,8 +25629,7 @@ define bfloat @v_log10_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26288,9 +25659,8 @@ define bfloat @v_log10_bf16(bfloat %a) {
; GFX9-NEXT: v_sub_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26300,7 +25670,6 @@ define bfloat @v_log10_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
; GFX10-NEXT: v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
@@ -26314,7 +25683,7 @@ define bfloat @v_log10_bf16(bfloat %a) {
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX10-NEXT: v_sub_f32_e32 v0, v0, v2
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -26325,7 +25694,6 @@ define bfloat @v_log10_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
; GFX11-NEXT: v_cndmask_b32_e64 v1, 1.0, 0x4f800000, vcc_lo
@@ -26345,7 +25713,7 @@ define bfloat @v_log10_bf16(bfloat %a) {
; GFX11-NEXT: v_sub_f32_e32 v0, v0, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -26442,8 +25810,7 @@ define bfloat @v_exp_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26473,9 +25840,8 @@ define bfloat @v_exp_bf16(bfloat %a) {
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26485,7 +25851,6 @@ define bfloat @v_exp_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v1, 0x3fb8aa3b, v0
; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v0
; GFX10-NEXT: v_rndne_f32_e32 v2, v1
@@ -26500,7 +25865,7 @@ define bfloat @v_exp_bf16(bfloat %a) {
; GFX10-NEXT: v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, 0x7f800000, v1, vcc_lo
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -26511,7 +25876,6 @@ define bfloat @v_exp_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mul_f32_e32 v1, 0x3fb8aa3b, v0
; GFX11-NEXT: v_rndne_f32_e32 v2, v1
@@ -26532,7 +25896,7 @@ define bfloat @v_exp_bf16(bfloat %a) {
; GFX11-NEXT: v_cndmask_b32_e32 v0, 0x7f800000, v1, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -26594,8 +25958,7 @@ define bfloat @v_exp2_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26616,9 +25979,8 @@ define bfloat @v_exp2_bf16(bfloat %a) {
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26628,7 +25990,6 @@ define bfloat @v_exp2_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0xc2fc0000, v0
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 0x42800000, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, 1.0, 0x1f800000, vcc_lo
@@ -26636,7 +25997,7 @@ define bfloat @v_exp2_bf16(bfloat %a) {
; GFX10-NEXT: v_exp_f32_e32 v0, v0
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -26647,7 +26008,6 @@ define bfloat @v_exp2_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0xc2fc0000, v0
; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 0x42800000, vcc_lo
@@ -26658,7 +26018,7 @@ define bfloat @v_exp2_bf16(bfloat %a) {
; GFX11-NEXT: s_waitcnt_depctr 0xfff
; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -26752,8 +26112,7 @@ define bfloat @v_exp10_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26783,9 +26142,8 @@ define bfloat @v_exp10_bf16(bfloat %a) {
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26795,7 +26153,6 @@ define bfloat @v_exp10_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v1, 0x40549a78, v0
; GFX10-NEXT: v_cmp_ngt_f32_e32 vcc_lo, 0xc23369f4, v0
; GFX10-NEXT: v_rndne_f32_e32 v2, v1
@@ -26810,7 +26167,7 @@ define bfloat @v_exp10_bf16(bfloat %a) {
; GFX10-NEXT: v_cmp_nlt_f32_e32 vcc_lo, 0x421a209b, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, 0x7f800000, v1, vcc_lo
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -26821,7 +26178,6 @@ define bfloat @v_exp10_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mul_f32_e32 v1, 0x40549a78, v0
; GFX11-NEXT: v_rndne_f32_e32 v2, v1
@@ -26842,7 +26198,7 @@ define bfloat @v_exp10_bf16(bfloat %a) {
; GFX11-NEXT: v_cndmask_b32_e32 v0, 0x7f800000, v1, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -26882,8 +26238,7 @@ define bfloat @v_ceil_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26896,9 +26251,8 @@ define bfloat @v_ceil_bf16(bfloat %a) {
; GFX9-NEXT: v_ceil_f32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26908,10 +26262,9 @@ define bfloat @v_ceil_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_ceil_f32_e32 v0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -26922,11 +26275,10 @@ define bfloat @v_ceil_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_ceil_f32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -26967,8 +26319,7 @@ define bfloat @v_trunc_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26981,9 +26332,8 @@ define bfloat @v_trunc_bf16(bfloat %a) {
; GFX9-NEXT: v_trunc_f32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -26993,10 +26343,9 @@ define bfloat @v_trunc_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_trunc_f32_e32 v0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -27007,11 +26356,10 @@ define bfloat @v_trunc_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_trunc_f32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -27052,8 +26400,7 @@ define bfloat @v_rint_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27066,9 +26413,8 @@ define bfloat @v_rint_bf16(bfloat %a) {
; GFX9-NEXT: v_rndne_f32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27078,10 +26424,9 @@ define bfloat @v_rint_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_rndne_f32_e32 v0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -27092,11 +26437,10 @@ define bfloat @v_rint_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_rndne_f32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -27137,8 +26481,7 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27151,9 +26494,8 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
; GFX9-NEXT: v_rndne_f32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27163,10 +26505,9 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_rndne_f32_e32 v0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -27177,11 +26518,10 @@ define bfloat @v_nearbyint_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_rndne_f32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -27240,8 +26580,7 @@ define bfloat @v_round_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27260,9 +26599,8 @@ define bfloat @v_round_bf16(bfloat %a) {
; GFX9-NEXT: v_add_f32_e32 v0, v1, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27276,11 +26614,10 @@ define bfloat @v_round_bf16(bfloat %a) {
; GFX10-NEXT: v_sub_f32_e32 v2, v0, v1
; GFX10-NEXT: v_cmp_ge_f32_e64 s4, |v2|, 0.5
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1.0, s4
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfi_b32 v0, 0x7fffffff, v2, v0
; GFX10-NEXT: v_add_f32_e32 v0, v1, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -27297,13 +26634,12 @@ define bfloat @v_round_bf16(bfloat %a) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cmp_ge_f32_e64 s0, |v2|, 0.5
; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1.0, s0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v2, v0
; GFX11-NEXT: v_add_f32_e32 v0, v1, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -27343,8 +26679,7 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27357,9 +26692,8 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
; GFX9-NEXT: v_rndne_f32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27369,10 +26703,9 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_rndne_f32_e32 v0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -27383,11 +26716,10 @@ define bfloat @v_roundeven_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_rndne_f32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -27428,8 +26760,7 @@ define bfloat @v_floor_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27442,9 +26773,8 @@ define bfloat @v_floor_bf16(bfloat %a) {
; GFX9-NEXT: v_floor_f32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27454,10 +26784,9 @@ define bfloat @v_floor_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_floor_f32_e32 v0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -27468,11 +26797,10 @@ define bfloat @v_floor_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_floor_f32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -27505,8 +26833,7 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27519,9 +26846,8 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
; GFX9-NEXT: v_max_f32_e32 v0, v0, v0
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -27531,10 +26857,9 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_max_f32_e32 v0, v0, v0
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -27545,11 +26870,10 @@ define bfloat @v_canonicalize_bf16(bfloat %a) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_max_f32_e32 v0, v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -31058,9 +30382,8 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
; GFX8-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT: v_and_b32_e32 v1, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -31072,9 +30395,8 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
; GFX9-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -31084,9 +30406,8 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -31097,11 +30418,10 @@ define bfloat @v_sitofp_i16_to_bf16(i16 %x) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -31143,16 +30463,14 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX8-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -31166,15 +30484,13 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX9-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v1
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX9-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -31186,12 +30502,11 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_sdwa v1, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
@@ -31205,16 +30520,15 @@ define <2 x bfloat> @v_sitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_bfe_i32 v1, v0, 0, 16
; GFX11-NEXT: v_ashrrev_i32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX11-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -31263,26 +30577,22 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
; GFX8-NEXT: v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX8-NEXT: v_cvt_f32_i32_sdwa v4, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX8-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
; GFX8-NEXT: v_bfe_u32 v3, v4, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v4
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
; GFX8-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -31294,25 +30604,22 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-NEXT: v_cvt_f32_i32_sdwa v4, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; GFX9-NEXT: v_cvt_f32_i32_sdwa v2, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v1
-; GFX9-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
-; GFX9-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v4
-; GFX9-NEXT: v_add3_u32 v2, v2, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v3, v3, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -31325,24 +30632,23 @@ define <3 x bfloat> @v_sitofp_v3i16_to_v3bf16(<3 x i16> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_sdwa v2, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v4, v1, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v6, v6, v1, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v6, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
; GFX10-NEXT: v_alignbit_b32 v1, s4, v1, 16
; GFX10-NEXT: s_setpc_b64 s[30:31]
%op = sitofp <3 x i16> %x to <3 x bfloat>
@@ -31393,31 +30699,27 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v2
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX8-NEXT: v_bfe_u32 v4, v5, 16, 1
; GFX8-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v5
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -31430,32 +30732,28 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_i32_sdwa v2, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-NEXT: v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_cvt_f32_i32_sdwa v5, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; GFX9-NEXT: v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX9-NEXT: v_cvt_f32_i32_sdwa v4, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX9-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
-; GFX9-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v5
-; GFX9-NEXT: v_add3_u32 v3, v3, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
+; GFX9-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX9-NEXT: v_add3_u32 v3, v3, v4, s4
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -31468,30 +30766,29 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_sdwa v2, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_cvt_f32_i32_sdwa v3, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_i32_sdwa v0, sext(v0) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX10-NEXT: v_cvt_f32_i32_sdwa v1, sext(v1) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX10-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX10-NEXT: v_bfe_u32 v8, v3, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v6, v3, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX10-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
; GFX10-NEXT: v_bfe_u32 v10, v0, 16, 1
-; GFX10-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v11, v0, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX10-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v0
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add3_u32 v10, v10, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v1, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v7, v6, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX10-NEXT: v_add3_u32 v6, v6, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v10, v11, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
; GFX10-NEXT: v_perm_b32 v1, v1, v2, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -31499,37 +30796,39 @@ define <4 x bfloat> @v_sitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_bfe_i32 v2, v1, 0, 16
+; GFX11-NEXT: v_ashrrev_i32_e32 v1, 16, v1
; GFX11-NEXT: v_bfe_i32 v3, v0, 0, 16
; GFX11-NEXT: v_ashrrev_i32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cvt_f32_i32_e32 v2, v2
+; GFX11-NEXT: v_cvt_f32_i32_e32 v1, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cvt_f32_i32_e32 v3, v3
; GFX11-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cvt_f32_i32_e32 v1, v1
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-NEXT: v_bfe_u32 v6, v1, 16, 1
+; GFX11-NEXT: v_bfe_u32 v8, v3, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_and_or_b32 v6, v3, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
+; GFX11-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX11-NEXT: v_bfe_u32 v10, v0, 16, 1
-; GFX11-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v11, v0, s0, 0x400000
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX11-NEXT: v_add3_u32 v6, v6, v1, 0x7fff
+; GFX11-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v10, v10, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v9, v1, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v7, v6, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v10, v11, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v8, v9, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v0, v0, v3, 0x7060302
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v6, v7, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_perm_b32 v1, v1, v2, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = sitofp <4 x i16> %x to <4 x bfloat>
@@ -31557,9 +30856,8 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
; GFX8-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT: v_and_b32_e32 v1, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -31571,9 +30869,8 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -31583,9 +30880,8 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -31596,10 +30892,9 @@ define bfloat @v_sitofp_i32_to_bf16(i32 %x) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -31636,16 +30931,14 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX8-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX8-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -31659,15 +30952,13 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -31679,12 +30970,11 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX10-NEXT: v_cvt_f32_i32_e32 v1, v1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_and_or_b32 v5, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
@@ -31698,13 +30988,12 @@ define <2 x bfloat> @v_sitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX11-NEXT: v_cvt_f32_i32_e32 v1, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_and_or_b32 v5, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -31749,23 +31038,20 @@ define <3 x bfloat> @v_sitofp_v3i32_to_v3bf16(<3 x i32> %x) {
; GFX8-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v2
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
@@ -31782,21 +31068,18 @@ define <3 x bfloat> @v_sitofp_v3i32_to_v3bf16(<3 x i32> %x) {
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v3, v3, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -31809,24 +31092,23 @@ define <3 x bfloat> @v_sitofp_v3i32_to_v3bf16(<3 x i32> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX10-NEXT: v_cvt_f32_i32_e32 v1, v1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_i32_e32 v2, v2
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX10-NEXT: v_bfe_u32 v4, v2, 16, 1
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v5, v5, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX10-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v6, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc_lo
; GFX10-NEXT: v_alignbit_b32 v1, s4, v2, 16
; GFX10-NEXT: s_setpc_b64 s[30:31]
%op = sitofp <3 x i32> %x to <3 x bfloat>
@@ -31869,31 +31151,27 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_bfe_u32 v5, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v3
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v4, vcc
; GFX8-NEXT: v_bfe_u32 v5, v0, 16, 1
; GFX8-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v0
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
@@ -31910,28 +31188,24 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v4, v4, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -31944,30 +31218,29 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_i32_e32 v2, v2
; GFX10-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_i32_e32 v1, v1
; GFX10-NEXT: v_cvt_f32_i32_e32 v3, v3
-; GFX10-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX10-NEXT: v_bfe_u32 v7, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v9, v0, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v10, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT: v_and_or_b32 v11, v1, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX10-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX10-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add3_u32 v4, v10, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v3, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v5, v7, v3, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc_lo
+; GFX10-NEXT: v_add3_u32 v9, v9, v1, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v10, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v4, vcc_lo
; GFX10-NEXT: v_perm_b32 v1, v3, v2, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -31976,32 +31249,32 @@ define <4 x bfloat> @v_sitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cvt_f32_i32_e32 v2, v2
; GFX11-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: v_cvt_f32_i32_e32 v3, v3
; GFX11-NEXT: v_cvt_f32_i32_e32 v1, v1
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX11-NEXT: v_cvt_f32_i32_e32 v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v9, v0, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v10, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v11, v1, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX11-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-NEXT: v_add3_u32 v9, v9, v1, 0x7fff
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_and_or_b32 v6, v3, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v4, v10, v1, 0x7fff
-; GFX11-NEXT: v_add3_u32 v5, v7, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc_lo
+; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v10, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v4, vcc_lo
; GFX11-NEXT: v_perm_b32 v1, v3, v2, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = sitofp <4 x i32> %x to <4 x bfloat>
@@ -32063,8 +31336,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -32087,9 +31359,8 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
; GFX9-NEXT: v_sub_u32_e32 v1, 32, v2
; GFX9-NEXT: v_ldexp_f32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -32100,7 +31371,6 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_xor_b32_e32 v2, v0, v1
; GFX10-NEXT: v_ffbh_i32_e32 v3, v1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_ashrrev_i32_e32 v2, 31, v2
; GFX10-NEXT: v_add_nc_u32_e32 v3, -1, v3
; GFX10-NEXT: v_add_nc_u32_e32 v2, 32, v2
@@ -32112,7 +31382,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
; GFX10-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX10-NEXT: v_ldexp_f32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -32124,7 +31394,6 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_xor_b32_e32 v2, v0, v1
; GFX11-NEXT: v_cls_i32_e32 v3, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_ashrrev_i32_e32 v2, 31, v2
; GFX11-NEXT: v_add_nc_u32_e32 v3, -1, v3
@@ -32141,7 +31410,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_ldexp_f32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -32240,22 +31509,20 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, -1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 32, v1
-; GFX8-NEXT: v_min_u32_e32 v7, v0, v1
-; GFX8-NEXT: v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
+; GFX8-NEXT: v_min_u32_e32 v6, v0, v1
+; GFX8-NEXT: v_lshlrev_b64 v[0:1], v6, v[2:3]
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v4
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
; GFX8-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v7
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v6
; GFX8-NEXT: v_ldexp_f32 v0, v0, v2
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -32285,21 +31552,19 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v1
; GFX9-NEXT: v_add_u32_e32 v0, -1, v0
; GFX9-NEXT: v_add_u32_e32 v1, 32, v1
-; GFX9-NEXT: v_min_u32_e32 v7, v0, v1
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
+; GFX9-NEXT: v_min_u32_e32 v6, v0, v1
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v6, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v4
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
-; GFX9-NEXT: v_sub_u32_e32 v2, 32, v7
+; GFX9-NEXT: v_sub_u32_e32 v2, 32, v6
; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -32313,7 +31578,6 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX10-NEXT: v_xor_b32_e32 v5, v2, v3
; GFX10-NEXT: v_ffbh_i32_e32 v6, v1
; GFX10-NEXT: v_ffbh_i32_e32 v7, v3
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v4
; GFX10-NEXT: v_ashrrev_i32_e32 v5, 31, v5
; GFX10-NEXT: v_add_nc_u32_e32 v6, -1, v6
@@ -32336,9 +31600,9 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX10-NEXT: v_ldexp_f32 v1, v1, v3
; GFX10-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_and_or_b32 v5, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
@@ -32354,9 +31618,10 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX11-NEXT: v_xor_b32_e32 v5, v2, v3
; GFX11-NEXT: v_cls_i32_e32 v6, v1
; GFX11-NEXT: v_cls_i32_e32 v7, v3
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v4
; GFX11-NEXT: v_ashrrev_i32_e32 v5, 31, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_add_nc_u32_e32 v6, -1, v6
; GFX11-NEXT: v_add_nc_u32_e32 v7, -1, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
@@ -32385,9 +31650,9 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_and_or_b32 v5, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -32515,23 +31780,22 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_min_u32_e32 v7, v7, v8
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX8-NEXT: v_lshlrev_b64 v[0:1], v7, v[0:1]
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
+; GFX8-NEXT: v_xor_b32_e32 v6, v2, v3
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: v_ffbh_i32_e32 v5, v3
+; GFX8-NEXT: v_ashrrev_i32_e32 v6, 31, v6
+; GFX8-NEXT: v_cvt_f32_i32_e32 v0, v0
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, -1, v5
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, 32, v6
+; GFX8-NEXT: v_min_u32_e32 v5, v5, v6
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], v5, v[2:3]
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v4
; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 32, v7
-; GFX8-NEXT: v_xor_b32_e32 v7, v2, v3
-; GFX8-NEXT: v_ffbh_i32_e32 v6, v3
-; GFX8-NEXT: v_ashrrev_i32_e32 v7, 31, v7
-; GFX8-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, -1, v6
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 32, v7
-; GFX8-NEXT: v_min_u32_e32 v6, v6, v7
-; GFX8-NEXT: v_lshlrev_b64 v[2:3], v6, v[2:3]
; GFX8-NEXT: v_ldexp_f32 v0, v0, v4
; GFX8-NEXT: v_min_u32_e32 v2, 1, v2
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
@@ -32539,17 +31803,15 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_cvt_f32_i32_e32 v2, v2
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
-; GFX8-NEXT: v_sub_u32_e32 v3, vcc, 32, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, 32, v5
; GFX8-NEXT: v_ldexp_f32 v2, v2, v3
; GFX8-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v2
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
@@ -32565,30 +31827,29 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX9-NEXT: v_add_u32_e32 v6, -1, v6
; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
; GFX9-NEXT: v_min_u32_e32 v6, v6, v7
-; GFX9-NEXT: v_xor_b32_e32 v8, v0, v1
; GFX9-NEXT: v_lshlrev_b64 v[4:5], v6, v[4:5]
-; GFX9-NEXT: v_ffbh_i32_e32 v7, v1
-; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v8
-; GFX9-NEXT: v_add_u32_e32 v7, -1, v7
-; GFX9-NEXT: v_add_u32_e32 v8, 32, v8
+; GFX9-NEXT: v_xor_b32_e32 v7, v0, v1
; GFX9-NEXT: v_min_u32_e32 v4, 1, v4
-; GFX9-NEXT: v_min_u32_e32 v7, v7, v8
; GFX9-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[0:1]
+; GFX9-NEXT: v_sub_u32_e32 v5, 32, v6
+; GFX9-NEXT: v_ffbh_i32_e32 v6, v1
+; GFX9-NEXT: v_ashrrev_i32_e32 v7, 31, v7
+; GFX9-NEXT: v_add_u32_e32 v6, -1, v6
+; GFX9-NEXT: v_add_u32_e32 v7, 32, v7
+; GFX9-NEXT: v_min_u32_e32 v6, v6, v7
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v6, v[0:1]
; GFX9-NEXT: v_cvt_f32_i32_e32 v4, v4
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
-; GFX9-NEXT: v_sub_u32_e32 v5, 32, v6
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX9-NEXT: v_ldexp_f32 v4, v4, v5
; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_sub_u32_e32 v1, 32, v7
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
+; GFX9-NEXT: v_sub_u32_e32 v1, 32, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
; GFX9-NEXT: v_ldexp_f32 v5, v0, v1
; GFX9-NEXT: v_bfe_u32 v0, v5, 16, 1
; GFX9-NEXT: v_xor_b32_e32 v1, v2, v3
@@ -32597,21 +31858,19 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v1
; GFX9-NEXT: v_add_u32_e32 v0, -1, v0
; GFX9-NEXT: v_add_u32_e32 v1, 32, v1
-; GFX9-NEXT: v_min_u32_e32 v8, v0, v1
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v8, v[2:3]
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v5
+; GFX9-NEXT: v_min_u32_e32 v7, v0, v1
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v5
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc
-; GFX9-NEXT: v_sub_u32_e32 v2, 32, v8
+; GFX9-NEXT: v_sub_u32_e32 v2, 32, v7
; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -32638,7 +31897,6 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX10-NEXT: v_add_nc_u32_e32 v11, -1, v11
; GFX10-NEXT: v_add_nc_u32_e32 v9, 32, v9
; GFX10-NEXT: v_min_u32_e32 v8, v10, v8
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_u32_e32 v6, v6, v7
; GFX10-NEXT: v_min_u32_e32 v7, v11, v9
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
@@ -32660,13 +31918,13 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX10-NEXT: v_ldexp_f32 v1, v1, v6
; GFX10-NEXT: v_ldexp_f32 v2, v2, v4
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_bfe_u32 v5, v2, 16, 1
; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v2, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v2
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
@@ -32820,10 +32078,9 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v5
; GFX8-NEXT: v_add_u32_e32 v4, vcc, -1, v4
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 32, v5
-; GFX8-NEXT: v_min_u32_e32 v11, v4, v5
-; GFX8-NEXT: v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v8
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX8-NEXT: v_min_u32_e32 v10, v4, v5
+; GFX8-NEXT: v_lshlrev_b64 v[4:5], v10, v[6:7]
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v8
; GFX8-NEXT: v_min_u32_e32 v4, 1, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
@@ -32836,7 +32093,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v9, vcc, 32, v9
; GFX8-NEXT: v_min_u32_e32 v8, v8, v9
; GFX8-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
-; GFX8-NEXT: v_sub_u32_e32 v6, vcc, 32, v11
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, 32, v10
; GFX8-NEXT: v_ldexp_f32 v4, v4, v6
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
@@ -32844,8 +32101,7 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
; GFX8-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT: v_and_b32_e32 v7, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 32, v8
@@ -32858,22 +32114,20 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v1
; GFX8-NEXT: v_add_u32_e32 v0, vcc, -1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 32, v1
-; GFX8-NEXT: v_min_u32_e32 v9, v0, v1
-; GFX8-NEXT: v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX8-NEXT: v_and_b32_e32 v8, 0xff800000, v6
+; GFX8-NEXT: v_min_u32_e32 v8, v0, v1
+; GFX8-NEXT: v_lshlrev_b64 v[0:1], v8, v[2:3]
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
; GFX8-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v9
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v8
; GFX8-NEXT: v_ldexp_f32 v0, v0, v2
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
@@ -32905,34 +32159,32 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX9-NEXT: v_ashrrev_i32_e32 v5, 31, v5
; GFX9-NEXT: v_add_u32_e32 v4, -1, v4
; GFX9-NEXT: v_add_u32_e32 v5, 32, v5
-; GFX9-NEXT: v_min_u32_e32 v11, v4, v5
-; GFX9-NEXT: v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v8
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v10
-; GFX9-NEXT: v_min_u32_e32 v4, 1, v4
+; GFX9-NEXT: v_min_u32_e32 v10, v4, v5
+; GFX9-NEXT: v_lshlrev_b64 v[4:5], v10, v[6:7]
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
+; GFX9-NEXT: v_xor_b32_e32 v8, v0, v1
+; GFX9-NEXT: v_ffbh_i32_e32 v7, v1
+; GFX9-NEXT: v_ashrrev_i32_e32 v8, 31, v8
+; GFX9-NEXT: v_add_u32_e32 v7, -1, v7
+; GFX9-NEXT: v_add_u32_e32 v8, 32, v8
+; GFX9-NEXT: v_min_u32_e32 v4, 1, v4
+; GFX9-NEXT: v_min_u32_e32 v7, v7, v8
; GFX9-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v6, vcc
-; GFX9-NEXT: v_xor_b32_e32 v9, v0, v1
-; GFX9-NEXT: v_ffbh_i32_e32 v8, v1
-; GFX9-NEXT: v_ashrrev_i32_e32 v9, 31, v9
-; GFX9-NEXT: v_add_u32_e32 v8, -1, v8
-; GFX9-NEXT: v_add_u32_e32 v9, 32, v9
-; GFX9-NEXT: v_min_u32_e32 v8, v8, v9
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[0:1]
; GFX9-NEXT: v_cvt_f32_i32_e32 v4, v4
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
-; GFX9-NEXT: v_sub_u32_e32 v6, 32, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v6, vcc
+; GFX9-NEXT: v_sub_u32_e32 v6, 32, v10
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
; GFX9-NEXT: v_ldexp_f32 v4, v4, v6
; GFX9-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v4
; GFX9-NEXT: v_add3_u32 v6, v6, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_sub_u32_e32 v1, 32, v8
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
+; GFX9-NEXT: v_sub_u32_e32 v1, 32, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc
; GFX9-NEXT: v_ldexp_f32 v6, v0, v1
; GFX9-NEXT: v_bfe_u32 v0, v6, 16, 1
; GFX9-NEXT: v_xor_b32_e32 v1, v2, v3
@@ -32941,21 +32193,19 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v1
; GFX9-NEXT: v_add_u32_e32 v0, -1, v0
; GFX9-NEXT: v_add_u32_e32 v1, 32, v1
-; GFX9-NEXT: v_min_u32_e32 v9, v0, v1
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX9-NEXT: v_and_b32_e32 v8, 0xff800000, v6
+; GFX9-NEXT: v_min_u32_e32 v8, v0, v1
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v8, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
; GFX9-NEXT: v_cvt_f32_i32_e32 v0, v0
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
-; GFX9-NEXT: v_sub_u32_e32 v2, 32, v9
+; GFX9-NEXT: v_sub_u32_e32 v2, 32, v8
; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -32989,7 +32239,6 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX10-NEXT: v_lshlrev_b64 v[4:5], v8, v[4:5]
; GFX10-NEXT: v_add_nc_u32_e32 v9, 32, v9
; GFX10-NEXT: v_add_nc_u32_e32 v13, -1, v13
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_lshlrev_b64 v[6:7], v10, v[6:7]
; GFX10-NEXT: v_min_u32_e32 v9, v12, v9
; GFX10-NEXT: v_min_u32_e32 v11, v13, v14
@@ -33015,21 +32264,21 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX10-NEXT: v_bfe_u32 v7, v2, 16, 1
; GFX10-NEXT: v_ldexp_f32 v3, v3, v4
; GFX10-NEXT: v_ldexp_f32 v0, v0, v5
-; GFX10-NEXT: v_and_or_b32 v5, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX10-NEXT: v_ldexp_f32 v1, v1, v6
; GFX10-NEXT: v_add3_u32 v4, v7, v2, 0x7fff
; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v9, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX10-NEXT: v_add3_u32 v4, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v5, v7, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v7, v8, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v1
; GFX10-NEXT: v_cndmask_b32_e32 v0, v5, v6, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_cndmask_b32_e32 v1, v7, v8, vcc_lo
@@ -33065,16 +32314,15 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX11-NEXT: v_lshlrev_b64 v[4:5], v8, v[4:5]
; GFX11-NEXT: v_add_nc_u32_e32 v9, 32, v9
; GFX11-NEXT: v_add_nc_u32_e32 v13, -1, v13
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_lshlrev_b64 v[6:7], v10, v[6:7]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_min_u32_e32 v9, v12, v9
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_min_u32_e32 v11, v13, v14
; GFX11-NEXT: v_min_u32_e32 v4, 1, v4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_lshlrev_b64 v[0:1], v9, v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_lshlrev_b64 v[2:3], v11, v[2:3]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-NEXT: v_or_b32_e32 v4, v5, v4
; GFX11-NEXT: v_min_u32_e32 v5, 1, v6
; GFX11-NEXT: v_sub_nc_u32_e32 v6, 32, v8
@@ -33096,21 +32344,21 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
; GFX11-NEXT: v_ldexp_f32 v3, v3, v4
; GFX11-NEXT: v_ldexp_f32 v0, v0, v5
-; GFX11-NEXT: v_and_or_b32 v5, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-NEXT: v_ldexp_f32 v1, v1, v6
; GFX11-NEXT: v_add3_u32 v4, v7, v2, 0x7fff
; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v9, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX11-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX11-NEXT: v_add3_u32 v4, v6, v3, 0x7fff
; GFX11-NEXT: v_add3_u32 v5, v7, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v7, v8, v1, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cndmask_b32_e32 v0, v5, v6, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -33148,9 +32396,8 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
; GFX8-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -33162,9 +32409,8 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
; GFX9-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -33174,9 +32420,8 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -33187,11 +32432,10 @@ define bfloat @v_uitofp_i16_to_bf16(i16 %x) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -33233,16 +32477,14 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX8-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX8-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -33256,15 +32498,13 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX9-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v1
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX9-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -33276,12 +32516,11 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
@@ -33295,16 +32534,15 @@ define <2 x bfloat> @v_uitofp_v2i16_to_v2bf16(<2 x i16> %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -33357,22 +32595,19 @@ define <3 x bfloat> @v_uitofp_v3i16_to_v3bf16(<3 x i16> %x) {
; GFX8-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; GFX8-NEXT: v_bfe_u32 v2, v4, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
; GFX8-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -33384,25 +32619,22 @@ define <3 x bfloat> @v_uitofp_v3i16_to_v3bf16(<3 x i16> %x) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-NEXT: v_cvt_f32_u32_sdwa v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; GFX9-NEXT: v_cvt_f32_u32_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v1
-; GFX9-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
-; GFX9-NEXT: v_bfe_u32 v2, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v4
-; GFX9-NEXT: v_add3_u32 v2, v2, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
+; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v3, v3, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -33415,17 +32647,16 @@ define <3 x bfloat> @v_uitofp_v3i16_to_v3bf16(<3 x i16> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -33484,30 +32715,26 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX8-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v2
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX8-NEXT: v_bfe_u32 v3, v5, 16, 1
; GFX8-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -33520,32 +32747,28 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cvt_f32_u32_sdwa v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX9-NEXT: v_cvt_f32_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_cvt_f32_u32_sdwa v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
+; GFX9-NEXT: v_cvt_f32_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX9-NEXT: v_cvt_f32_u32_sdwa v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX9-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
-; GFX9-NEXT: v_bfe_u32 v3, v5, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v5
-; GFX9-NEXT: v_add3_u32 v3, v3, v5, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
+; GFX9-NEXT: v_bfe_u32 v3, v4, 16, 1
+; GFX9-NEXT: v_add3_u32 v3, v3, v4, s4
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -33558,23 +32781,22 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_sdwa v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
; GFX10-NEXT: v_cvt_f32_u32_sdwa v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX10-NEXT: v_cvt_f32_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX10-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v5, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v8, v3, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v9, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX10-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
; GFX10-NEXT: v_bfe_u32 v10, v0, 16, 1
; GFX10-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v11, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v11, 0x400000, v0
; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add3_u32 v10, v10, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v7, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v6, v6, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -33589,17 +32811,16 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cvt_f32_u32_e32 v2, v2
; GFX11-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v5, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v7, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v7, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -33610,11 +32831,11 @@ define <4 x bfloat> @v_uitofp_v4i16_to_v4bf16(<4 x i16> %x) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX11-NEXT: v_bfe_u32 v8, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v9, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v10, v0, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_and_or_b32 v11, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v8, v8, v3, 0x7fff
; GFX11-NEXT: v_add3_u32 v10, v10, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -33652,9 +32873,8 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT: v_and_b32_e32 v1, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -33666,9 +32886,8 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -33678,9 +32897,8 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -33691,10 +32909,9 @@ define bfloat @v_uitofp_i32_to_bf16(i32 %x) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -33731,16 +32948,14 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX8-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX8-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -33754,15 +32969,13 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -33774,12 +32987,11 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, v1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_and_or_b32 v5, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
@@ -33793,13 +33005,12 @@ define <2 x bfloat> @v_uitofp_v2i32_to_v2bf16(<2 x i32> %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX11-NEXT: v_cvt_f32_u32_e32 v1, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_and_or_b32 v5, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -33844,23 +33055,20 @@ define <3 x bfloat> @v_uitofp_v3i32_to_v3bf16(<3 x i32> %x) {
; GFX8-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v2
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
@@ -33877,21 +33085,18 @@ define <3 x bfloat> @v_uitofp_v3i32_to_v3bf16(<3 x i32> %x) {
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v3, v3, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v3, v3, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -33904,24 +33109,23 @@ define <3 x bfloat> @v_uitofp_v3i32_to_v3bf16(<3 x i32> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, v1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_u32_e32 v2, v2
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX10-NEXT: v_bfe_u32 v4, v2, 16, 1
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v5, v5, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v6, v6, v2, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v2
+; GFX10-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v6, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc_lo
; GFX10-NEXT: v_alignbit_b32 v1, s4, v2, 16
; GFX10-NEXT: s_setpc_b64 s[30:31]
%op = uitofp <3 x i32> %x to <3 x bfloat>
@@ -33964,31 +33168,27 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_bfe_u32 v5, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v3
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v4, vcc
; GFX8-NEXT: v_bfe_u32 v5, v0, 16, 1
; GFX8-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v0
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v5
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
@@ -34005,28 +33205,24 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX9-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v2, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v4, v4, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v4, v4, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -34039,30 +33235,29 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cvt_f32_u32_e32 v2, v2
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, v1
; GFX10-NEXT: v_cvt_f32_u32_e32 v3, v3
-; GFX10-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v2, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX10-NEXT: v_bfe_u32 v7, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_and_or_b32 v9, v0, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v10, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT: v_and_or_b32 v11, v1, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX10-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX10-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add3_u32 v4, v10, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v3, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v5, v7, v3, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc_lo
+; GFX10-NEXT: v_add3_u32 v9, v9, v1, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v10, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v4, vcc_lo
; GFX10-NEXT: v_perm_b32 v1, v3, v2, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -34071,32 +33266,32 @@ define <4 x bfloat> @v_uitofp_v4i32_to_v4bf16(<4 x i32> %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cvt_f32_u32_e32 v2, v2
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: v_cvt_f32_u32_e32 v3, v3
; GFX11-NEXT: v_cvt_f32_u32_e32 v1, v1
-; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v2, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
+; GFX11-NEXT: v_cvt_f32_u32_e32 v3, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v9, v0, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v10, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v11, v1, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v4, vcc_lo
+; GFX11-NEXT: v_add3_u32 v4, v4, v2, 0x7fff
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v1
+; GFX11-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
+; GFX11-NEXT: v_add3_u32 v9, v9, v1, 0x7fff
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_and_or_b32 v6, v3, s0, 0x400000
-; GFX11-NEXT: v_add3_u32 v4, v10, v1, 0x7fff
-; GFX11-NEXT: v_add3_u32 v5, v7, v3, 0x7fff
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v8, v9, vcc_lo
+; GFX11-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v11, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v9, v10, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v4, vcc_lo
; GFX11-NEXT: v_perm_b32 v1, v3, v2, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = uitofp <4 x i32> %x to <4 x bfloat>
@@ -34146,8 +33341,7 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -34166,9 +33360,8 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
; GFX9-NEXT: v_sub_u32_e32 v1, 32, v2
; GFX9-NEXT: v_ldexp_f32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -34178,7 +33371,6 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_ffbh_u32_e32 v2, v1
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_u32_e32 v2, 32, v2
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
; GFX10-NEXT: v_min_u32_e32 v0, 1, v0
@@ -34187,7 +33379,7 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
; GFX10-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX10-NEXT: v_ldexp_f32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -34198,7 +33390,6 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_clz_i32_u32_e32 v2, v1
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_min_u32_e32 v2, 32, v2
; GFX11-NEXT: v_lshlrev_b64 v[0:1], v2, v[0:1]
@@ -34211,7 +33402,7 @@ define bfloat @v_uitofp_i64_to_bf16(i64 %x) {
; GFX11-NEXT: v_ldexp_f32 v0, v0, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -34284,22 +33475,20 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0x7fff, v0
; GFX8-NEXT: v_ffbh_u32_e32 v0, v3
-; GFX8-NEXT: v_min_u32_e32 v7, 32, v0
-; GFX8-NEXT: v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
+; GFX8-NEXT: v_min_u32_e32 v6, 32, v0
+; GFX8-NEXT: v_lshlrev_b64 v[0:1], v6, v[2:3]
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v4
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v7
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v6
; GFX8-NEXT: v_ldexp_f32 v0, v0, v2
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -34321,21 +33510,19 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX9-NEXT: v_bfe_u32 v0, v4, 16, 1
; GFX9-NEXT: v_add3_u32 v5, v0, v4, s4
; GFX9-NEXT: v_ffbh_u32_e32 v0, v3
-; GFX9-NEXT: v_min_u32_e32 v7, 32, v0
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[2:3]
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
+; GFX9-NEXT: v_min_u32_e32 v6, 32, v0
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v6, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v4
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
-; GFX9-NEXT: v_sub_u32_e32 v2, 32, v7
+; GFX9-NEXT: v_sub_u32_e32 v2, 32, v6
; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -34347,7 +33534,6 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_ffbh_u32_e32 v4, v1
; GFX10-NEXT: v_ffbh_u32_e32 v5, v3
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_u32_e32 v4, 32, v4
; GFX10-NEXT: v_min_u32_e32 v5, 32, v5
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v4, v[0:1]
@@ -34364,9 +33550,9 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX10-NEXT: v_ldexp_f32 v1, v1, v3
; GFX10-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_and_or_b32 v5, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
@@ -34380,7 +33566,6 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_clz_i32_u32_e32 v4, v1
; GFX11-NEXT: v_clz_i32_u32_e32 v5, v3
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_min_u32_e32 v4, 32, v4
; GFX11-NEXT: v_min_u32_e32 v5, 32, v5
@@ -34404,9 +33589,9 @@ define <2 x bfloat> @v_uitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_and_or_b32 v5, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
@@ -34503,16 +33688,15 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v4
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX8-NEXT: v_ffbh_u32_e32 v6, v3
-; GFX8-NEXT: v_min_u32_e32 v6, 32, v6
-; GFX8-NEXT: v_lshlrev_b64 v[2:3], v6, v[2:3]
+; GFX8-NEXT: v_ffbh_u32_e32 v5, v3
+; GFX8-NEXT: v_min_u32_e32 v5, 32, v5
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], v5, v[2:3]
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v4
; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 32, v7
; GFX8-NEXT: v_ldexp_f32 v0, v0, v4
@@ -34522,17 +33706,15 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
; GFX8-NEXT: v_cvt_f32_u32_e32 v2, v2
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v5, vcc
-; GFX8-NEXT: v_sub_u32_e32 v3, vcc, 32, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, 32, v5
; GFX8-NEXT: v_ldexp_f32 v2, v2, v3
; GFX8-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v2
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v2
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v2
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
@@ -34545,44 +33727,41 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX9-NEXT: v_ffbh_u32_e32 v6, v5
; GFX9-NEXT: v_min_u32_e32 v6, 32, v6
; GFX9-NEXT: v_lshlrev_b64 v[4:5], v6, v[4:5]
-; GFX9-NEXT: v_ffbh_u32_e32 v7, v1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_min_u32_e32 v4, 1, v4
-; GFX9-NEXT: v_min_u32_e32 v7, 32, v7
; GFX9-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[0:1]
+; GFX9-NEXT: v_sub_u32_e32 v5, 32, v6
+; GFX9-NEXT: v_ffbh_u32_e32 v6, v1
+; GFX9-NEXT: v_min_u32_e32 v6, 32, v6
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v6, v[0:1]
; GFX9-NEXT: v_cvt_f32_u32_e32 v4, v4
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
-; GFX9-NEXT: v_sub_u32_e32 v5, 32, v6
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX9-NEXT: v_ldexp_f32 v4, v4, v5
; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_sub_u32_e32 v1, 32, v7
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
+; GFX9-NEXT: v_sub_u32_e32 v1, 32, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc
; GFX9-NEXT: v_ldexp_f32 v5, v0, v1
; GFX9-NEXT: v_bfe_u32 v0, v5, 16, 1
; GFX9-NEXT: v_add3_u32 v6, v0, v5, s4
; GFX9-NEXT: v_ffbh_u32_e32 v0, v3
-; GFX9-NEXT: v_min_u32_e32 v8, 32, v0
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v8, v[2:3]
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v5
+; GFX9-NEXT: v_min_u32_e32 v7, 32, v0
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v5
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v7
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
; GFX9-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc
-; GFX9-NEXT: v_sub_u32_e32 v2, 32, v8
+; GFX9-NEXT: v_sub_u32_e32 v2, 32, v7
; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -34596,7 +33775,6 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX10-NEXT: v_ffbh_u32_e32 v6, v1
; GFX10-NEXT: v_ffbh_u32_e32 v8, v3
; GFX10-NEXT: v_ffbh_u32_e32 v7, v5
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_u32_e32 v6, 32, v6
; GFX10-NEXT: v_min_u32_e32 v8, 32, v8
; GFX10-NEXT: v_min_u32_e32 v7, 32, v7
@@ -34620,13 +33798,13 @@ define <3 x bfloat> @v_uitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX10-NEXT: v_ldexp_f32 v1, v1, v7
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_bfe_u32 v5, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v7, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v2
; GFX10-NEXT: v_add3_u32 v5, v5, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v7, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
@@ -34739,19 +33917,18 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v8
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v4
; GFX8-NEXT: v_ffbh_u32_e32 v4, v7
-; GFX8-NEXT: v_min_u32_e32 v11, 32, v4
-; GFX8-NEXT: v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX8-NEXT: v_and_b32_e32 v10, 0xff800000, v8
+; GFX8-NEXT: v_min_u32_e32 v10, 32, v4
+; GFX8-NEXT: v_lshlrev_b64 v[4:5], v10, v[6:7]
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v8
; GFX8-NEXT: v_min_u32_e32 v4, 1, v4
; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
; GFX8-NEXT: v_cvt_f32_u32_e32 v4, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX8-NEXT: v_ffbh_u32_e32 v8, v1
; GFX8-NEXT: v_min_u32_e32 v8, 32, v8
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v10
; GFX8-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v6, vcc
-; GFX8-NEXT: v_sub_u32_e32 v6, vcc, 32, v11
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, 32, v10
; GFX8-NEXT: v_ldexp_f32 v4, v4, v6
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
@@ -34759,8 +33936,7 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6
-; GFX8-NEXT: v_and_b32_e32 v7, 0xff800000, v4
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 32, v8
@@ -34769,22 +33945,20 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v6
; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v0
; GFX8-NEXT: v_ffbh_u32_e32 v0, v3
-; GFX8-NEXT: v_min_u32_e32 v9, 32, v0
-; GFX8-NEXT: v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX8-NEXT: v_and_b32_e32 v8, 0xff800000, v6
+; GFX8-NEXT: v_min_u32_e32 v8, 32, v0
+; GFX8-NEXT: v_lshlrev_b64 v[0:1], v8, v[2:3]
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX8-NEXT: v_min_u32_e32 v0, 1, v0
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
; GFX8-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v8
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v9
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 32, v8
; GFX8-NEXT: v_ldexp_f32 v0, v0, v2
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
@@ -34808,49 +33982,45 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX9-NEXT: v_bfe_u32 v4, v8, 16, 1
; GFX9-NEXT: v_add3_u32 v9, v4, v8, s4
; GFX9-NEXT: v_ffbh_u32_e32 v4, v7
-; GFX9-NEXT: v_min_u32_e32 v11, 32, v4
-; GFX9-NEXT: v_lshlrev_b64 v[4:5], v11, v[6:7]
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v8
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
-; GFX9-NEXT: v_ffbh_u32_e32 v8, v1
+; GFX9-NEXT: v_min_u32_e32 v10, 32, v4
+; GFX9-NEXT: v_lshlrev_b64 v[4:5], v10, v[6:7]
+; GFX9-NEXT: v_ffbh_u32_e32 v7, v1
; GFX9-NEXT: v_min_u32_e32 v4, 1, v4
-; GFX9-NEXT: v_min_u32_e32 v8, 32, v8
+; GFX9-NEXT: v_min_u32_e32 v7, 32, v7
; GFX9-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v8, v[0:1]
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v7, v[0:1]
; GFX9-NEXT: v_cvt_f32_u32_e32 v4, v4
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v10
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v8
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v6, vcc
-; GFX9-NEXT: v_sub_u32_e32 v6, 32, v11
+; GFX9-NEXT: v_sub_u32_e32 v6, 32, v10
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX9-NEXT: v_ldexp_f32 v4, v4, v6
; GFX9-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v4
; GFX9-NEXT: v_add3_u32 v6, v6, v4, s4
-; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v4
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX9-NEXT: v_sub_u32_e32 v1, 32, v8
-; GFX9-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
+; GFX9-NEXT: v_sub_u32_e32 v1, 32, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc
; GFX9-NEXT: v_ldexp_f32 v6, v0, v1
; GFX9-NEXT: v_bfe_u32 v0, v6, 16, 1
; GFX9-NEXT: v_add3_u32 v7, v0, v6, s4
; GFX9-NEXT: v_ffbh_u32_e32 v0, v3
-; GFX9-NEXT: v_min_u32_e32 v9, 32, v0
-; GFX9-NEXT: v_lshlrev_b64 v[0:1], v9, v[2:3]
-; GFX9-NEXT: v_and_b32_e32 v8, 0xff800000, v6
+; GFX9-NEXT: v_min_u32_e32 v8, 32, v0
+; GFX9-NEXT: v_lshlrev_b64 v[0:1], v8, v[2:3]
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX9-NEXT: v_min_u32_e32 v0, 1, v0
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, v0
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v8
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v2, vcc
-; GFX9-NEXT: v_sub_u32_e32 v2, 32, v9
+; GFX9-NEXT: v_sub_u32_e32 v2, 32, v8
; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -34865,7 +34035,6 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX10-NEXT: v_ffbh_u32_e32 v10, v1
; GFX10-NEXT: v_ffbh_u32_e32 v11, v3
; GFX10-NEXT: v_ffbh_u32_e32 v9, v7
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_min_u32_e32 v8, 32, v8
; GFX10-NEXT: v_min_u32_e32 v10, 32, v10
; GFX10-NEXT: v_min_u32_e32 v11, 32, v11
@@ -34894,27 +34063,27 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX10-NEXT: v_ldexp_f32 v1, v1, v3
; GFX10-NEXT: v_cvt_f32_u32_e32 v4, v6
; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v5, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX10-NEXT: v_bfe_u32 v7, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_ldexp_f32 v4, v4, v9
; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX10-NEXT: v_bfe_u32 v8, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add3_u32 v3, v9, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v5, v1, s4, 0x400000
+; GFX10-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v1
; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v4, s4, 0x400000
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v9, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
; GFX10-NEXT: v_perm_b32 v1, v3, v2, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -34925,9 +34094,10 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX11-NEXT: v_clz_i32_u32_e32 v10, v1
; GFX11-NEXT: v_clz_i32_u32_e32 v11, v3
; GFX11-NEXT: v_clz_i32_u32_e32 v9, v7
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_min_u32_e32 v8, 32, v8
; GFX11-NEXT: v_min_u32_e32 v10, 32, v10
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_min_u32_e32 v11, 32, v11
; GFX11-NEXT: v_min_u32_e32 v9, 32, v9
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
@@ -34948,41 +34118,42 @@ define <4 x bfloat> @v_uitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_or_b32_e32 v1, v3, v2
; GFX11-NEXT: v_sub_nc_u32_e32 v5, 32, v10
-; GFX11-NEXT: v_or_b32_e32 v6, v7, v6
-; GFX11-NEXT: v_cvt_f32_u32_e32 v2, v4
; GFX11-NEXT: v_sub_nc_u32_e32 v3, 32, v11
+; GFX11-NEXT: v_cvt_f32_u32_e32 v2, v4
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, v0
; GFX11-NEXT: v_cvt_f32_u32_e32 v1, v1
-; GFX11-NEXT: v_cvt_f32_u32_e32 v4, v6
-; GFX11-NEXT: v_ldexp_f32 v2, v2, v8
+; GFX11-NEXT: v_or_b32_e32 v6, v7, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_ldexp_f32 v2, v2, v8
; GFX11-NEXT: v_ldexp_f32 v0, v0, v5
-; GFX11-NEXT: v_ldexp_f32 v1, v1, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_ldexp_f32 v4, v4, v9
+; GFX11-NEXT: v_ldexp_f32 v1, v1, v3
+; GFX11-NEXT: v_cvt_f32_u32_e32 v4, v6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v5, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11-NEXT: v_bfe_u32 v8, v1, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
; GFX11-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v0, s0, 0x400000
-; GFX11-NEXT: v_bfe_u32 v9, v1, 16, 1
+; GFX11-NEXT: v_ldexp_f32 v4, v4, v9
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-NEXT: v_add3_u32 v8, v8, v1, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11-NEXT: v_add3_u32 v3, v9, v1, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v5, v1, s0, 0x400000
-; GFX11-NEXT: v_and_or_b32 v9, v4, s0, 0x400000
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
+; GFX11-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v1
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v4
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v7, v9, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
+; GFX11-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v9, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v6, v5, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_perm_b32 v1, v3, v2, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = uitofp <4 x i64> %x to <4 x bfloat>
@@ -40088,8 +39259,7 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -40104,9 +39274,8 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX9-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -40118,10 +39287,9 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
; GFX10-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v1, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_add3_u32 v0, v0, v2, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
@@ -40134,11 +39302,10 @@ define bfloat @v_fma_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_fmac_f32_e32 v2, v0, v1
; GFX11-NEXT: v_bfe_u32 v0, v2, 16, 1
-; GFX11-NEXT: v_and_or_b32 v1, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v0, v0, v2, 0x7fff
@@ -40206,16 +39373,14 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -40234,16 +39399,14 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -40259,14 +39422,13 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_fmac_f32_e32 v3, v5, v4
; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
; GFX10-NEXT: v_bfe_u32 v0, v3, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v3
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
-; GFX10-NEXT: v_and_or_b32 v5, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX10-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
@@ -40284,15 +39446,14 @@ define <2 x bfloat> @v_fma_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
; GFX11-NEXT: v_bfe_u32 v1, v2, 16, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_bfe_u32 v0, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_and_or_b32 v5, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
; GFX11-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
@@ -40375,8 +39536,7 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
@@ -40390,16 +39550,14 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -40416,9 +39574,8 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
; GFX9-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v4
@@ -40429,16 +39586,14 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -40460,16 +39615,15 @@ define <3 x bfloat> @v_fma_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat>
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7
; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2
; GFX10-NEXT: v_bfe_u32 v1, v6, 16, 1
-; GFX10-NEXT: v_and_or_b32 v3, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX10-NEXT: v_bfe_u32 v0, v5, 16, 1
; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
; GFX10-NEXT: v_add3_u32 v1, v1, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v7, v4, s4, 0x400000
-; GFX10-NEXT: v_and_or_b32 v8, v5, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v5
; GFX10-NEXT: v_add3_u32 v0, v0, v5, 0x7fff
; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
@@ -40572,17 +39726,15 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT: v_and_b32_e32 v8, 0xff800000, v6
; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5
-; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
@@ -40595,16 +39747,14 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v7, 0xff800000, v3
; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -40625,16 +39775,14 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_bfe_u32 v7, v6, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v8, 0xff800000, v6
; GFX9-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX9-NEXT: v_add3_u32 v7, v7, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v4
@@ -40645,16 +39793,14 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v3
; GFX9-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -40681,22 +39827,21 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX10-NEXT: v_bfe_u32 v10, v6, 16, 1
; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3
; GFX10-NEXT: v_fmac_f32_e32 v7, v9, v8
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v6
; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2
; GFX10-NEXT: v_add3_u32 v0, v10, v6, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v1, v6, s4, 0x400000
; GFX10-NEXT: v_bfe_u32 v2, v5, 16, 1
; GFX10-NEXT: v_bfe_u32 v3, v7, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX10-NEXT: v_bfe_u32 v8, v4, 16, 1
-; GFX10-NEXT: v_and_or_b32 v9, v5, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo
; GFX10-NEXT: v_add3_u32 v0, v2, v5, 0x7fff
; GFX10-NEXT: v_add3_u32 v2, v3, v7, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v3, v7, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v7
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX10-NEXT: v_add3_u32 v6, v8, v4, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v4, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
@@ -40717,14 +39862,13 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_fmac_f32_e32 v5, v1, v3
; GFX11-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11-NEXT: v_and_or_b32 v1, v6, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v1, 0x400000, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v2
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
@@ -40736,14 +39880,14 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX11-NEXT: v_fmac_f32_e32 v7, v9, v8
; GFX11-NEXT: v_bfe_u32 v8, v4, 16, 1
; GFX11-NEXT: v_add3_u32 v0, v2, v5, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v9, v5, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v3, v7, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
; GFX11-NEXT: v_add3_u32 v6, v8, v4, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v4, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
; GFX11-NEXT: v_add3_u32 v2, v3, v7, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v3, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
@@ -40803,8 +39947,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -40813,8 +39956,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -40828,18 +39970,16 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v3, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX9-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v2
; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -40850,10 +39990,9 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v3, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc_lo
@@ -40861,7 +40000,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v2, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
@@ -40873,11 +40012,10 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v3, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
@@ -40887,7 +40025,7 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX11-NEXT: v_add_f32_e32 v0, v0, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v2, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -40958,8 +40096,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
@@ -40971,16 +40108,14 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, s4, v1
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -40989,8 +40124,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -41005,36 +40139,32 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX9-NEXT: v_mul_f32_e32 v3, v4, v3
; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v2
; GFX9-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX9-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
; GFX9-NEXT: v_add_f32_e32 v0, v0, v1
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -41048,14 +40178,13 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v3, v4, v3
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX10-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX10-NEXT: v_and_or_b32 v5, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX10-NEXT: v_bfe_u32 v4, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_and_or_b32 v6, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX10-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
@@ -41066,13 +40195,13 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_and_or_b32 v4, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v5, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -41086,19 +40215,17 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v0
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_mul_f32_e32 v3, v4, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0
; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v5, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX11-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v6, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
@@ -41109,7 +40236,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_and_or_b32 v4, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_f32_e32 v0, v0, v2
@@ -41117,7 +40244,7 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX11-NEXT: v_bfe_u32 v3, v0, 16, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v5, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
@@ -41210,8 +40337,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -41221,8 +40347,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -41231,8 +40356,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
@@ -41243,16 +40367,14 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -41261,8 +40383,7 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -41278,54 +40399,48 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v5
; GFX9-NEXT: v_add_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v3, v5, v3
; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX9-NEXT: v_add_f32_e32 v3, v3, v5
-; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -41343,41 +40458,40 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
; GFX10-NEXT: v_mul_f32_e32 v3, v7, v6
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v8, v1, s4, 0x400000
-; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
+; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v9, v3, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v10, v0, s4, 0x400000
-; GFX10-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v8, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3
+; GFX10-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v0
+; GFX10-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v5
; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v6, v9, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v10, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_add_f32_e32 v2, v2, v5
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_and_or_b32 v7, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
@@ -41492,8 +40606,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6
; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT: v_and_b32_e32 v8, 0xff800000, v6
-; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
@@ -41505,16 +40618,14 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT: v_and_b32_e32 v8, 0xff800000, v6
; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v7, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
@@ -41523,8 +40634,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v1
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -41533,8 +40643,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v7, 0xff800000, v3
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
@@ -41545,16 +40654,14 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_and_b32_e32 v7, 0xff800000, v3
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT: v_and_b32_e32 v5, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -41563,8 +40670,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
-; GFX8-NEXT: v_and_b32_e32 v4, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -41581,72 +40687,64 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX9-NEXT: v_mul_f32_e32 v6, v7, v6
; GFX9-NEXT: v_bfe_u32 v7, v6, 16, 1
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
-; GFX9-NEXT: v_and_b32_e32 v8, 0xff800000, v6
; GFX9-NEXT: v_add3_u32 v7, v7, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 16, v5
; GFX9-NEXT: v_add_f32_e32 v6, v6, v7
-; GFX9-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v8, 0xff800000, v6
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX9-NEXT: v_bfe_u32 v7, v6, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3
; GFX9-NEXT: v_add3_u32 v7, v7, v6, s4
-; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
; GFX9-NEXT: v_add_f32_e32 v1, v1, v3
; GFX9-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX9-NEXT: v_mul_f32_e32 v3, v5, v3
; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX9-NEXT: v_add_f32_e32 v3, v3, v5
-; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v7, 0xff800000, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX9-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v5, 0xff800000, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX9-NEXT: v_or_b32_e32 v5, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
; GFX9-NEXT: v_add_f32_e32 v0, v0, v2
; GFX9-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v4, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX9-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
@@ -41667,45 +40765,44 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT: s_mov_b32 s4, 0xff800000
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v5
; GFX10-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX10-NEXT: v_and_or_b32 v3, v6, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6
; GFX10-NEXT: v_mul_f32_e32 v7, v9, v7
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v10, v10, v6, 0x7fff
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT: v_and_or_b32 v6, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX10-NEXT: v_bfe_u32 v9, v7, 16, 1
; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
; GFX10-NEXT: v_bfe_u32 v11, v0, 16, 1
; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_and_or_b32 v10, v7, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX10-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
-; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v0
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT: v_and_or_b32 v12, v0, s4, 0x400000
; GFX10-NEXT: v_add3_u32 v11, v11, v0, 0x7fff
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_add_f32_e32 v3, v3, v8
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v10, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
; GFX10-NEXT: v_add_f32_e32 v1, v1, v5
-; GFX10-NEXT: v_and_or_b32 v5, v3, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_add_f32_e32 v2, v2, v6
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v9, v1, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
; GFX10-NEXT: v_add3_u32 v4, v7, v3, 0x7fff
; GFX10-NEXT: v_bfe_u32 v7, v2, 16, 1
@@ -41713,10 +40810,10 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo
; GFX10-NEXT: v_add3_u32 v4, v6, v1, 0x7fff
; GFX10-NEXT: v_add3_u32 v5, v7, v2, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v6, v2, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v2
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_and_or_b32 v8, v0, s4, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
@@ -41736,7 +40833,6 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v3
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 0xffff0000, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v2
@@ -41744,20 +40840,20 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v10, v6, 16, 1
; GFX11-NEXT: v_mul_f32_e32 v7, v9, v7
-; GFX11-NEXT: v_and_or_b32 v3, v6, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v6
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_add3_u32 v10, v10, v6, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX11-NEXT: v_bfe_u32 v9, v7, 16, 1
; GFX11-NEXT: v_dual_cndmask_b32 v3, v10, v3 :: v_dual_mul_f32 v0, v0, v2
; GFX11-NEXT: v_bfe_u32 v2, v1, 16, 1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: v_and_or_b32 v10, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v7
; GFX11-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
; GFX11-NEXT: v_bfe_u32 v11, v0, 16, 1
; GFX11-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v12, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_add3_u32 v11, v11, v0, 0x7fff
; GFX11-NEXT: v_dual_cndmask_b32 v1, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v4
@@ -41769,7 +40865,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX11-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v2, 0xffff0000, v2
; GFX11-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_or_b32 v9, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
; GFX11-NEXT: v_add_f32_e32 v2, v2, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
@@ -41781,7 +40877,7 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX11-NEXT: v_bfe_u32 v8, v0, 16, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11-NEXT: v_and_or_b32 v5, v3, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add3_u32 v4, v7, v3, 0x7fff
; GFX11-NEXT: v_bfe_u32 v7, v2, 16, 1
@@ -41789,10 +40885,10 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX11-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo
; GFX11-NEXT: v_add3_u32 v4, v6, v1, 0x7fff
; GFX11-NEXT: v_add3_u32 v5, v7, v2, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v6, v2, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v6, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v8, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll b/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll
index d35871e3774d..99b163dc9753 100644
--- a/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmed3-cast-combine.ll
@@ -790,8 +790,7 @@ define bfloat @fmed3_f32_fpext_f16_fptrunc_bf16(half %arg0, half %arg1, half %ar
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_and_b32_e32 v2, 0xff800000, v0
-; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
@@ -806,9 +805,8 @@ define bfloat @fmed3_f32_fpext_f16_fptrunc_bf16(half %arg0, half %arg1, half %ar
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
; GFX9-NEXT: v_med3_f32 v0, v0, v1, v2
; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v2, 0xff800000, v0
; GFX9-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
index 9142858806f1..5889de7faf3e 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomics-fp.ll
@@ -1524,9 +1524,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX900-NEXT: v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX900-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX900-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
@@ -1566,9 +1565,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX908-NEXT: v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX908-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX908-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX908-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX908-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
@@ -1608,9 +1606,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX90A-NEXT: v_lshrrev_b32_sdwa v1, s5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX90A-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX90A-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX90A-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX90A-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc
; GFX90A-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
@@ -1632,7 +1629,6 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: s_mov_b32 s5, 0xff800000
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_and_b32 s0, s2, -4
; GFX10-NEXT: s_mov_b32 s1, s3
@@ -1650,7 +1646,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX10-NEXT: v_lshrrev_b32_sdwa v1, s2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX10-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v1, s5, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
@@ -1673,7 +1669,6 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX11-LABEL: global_atomic_fadd_ret_bf16_agent:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
-; GFX11-NEXT: s_mov_b32 s5, 0xff800000
; GFX11-NEXT: v_mov_b32_e32 v0, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_and_b32 s0, s2, -4
@@ -1694,7 +1689,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_agent(ptr addrspace(1) %p
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v1, s5, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
@@ -1744,9 +1739,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX900-NEXT: v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX900-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX900-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
@@ -1786,9 +1780,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX908-NEXT: v_lshrrev_b32_sdwa v1, s5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX908-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX908-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX908-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; GFX908-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
@@ -1828,9 +1821,8 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX90A-NEXT: v_lshrrev_b32_sdwa v1, s5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX90A-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX90A-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX90A-NEXT: v_and_b32_e32 v4, 0xff800000, v1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX90A-NEXT: v_add3_u32 v2, v2, v1, s4
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v4
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX90A-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc
; GFX90A-NEXT: v_lshlrev_b32_sdwa v1, s5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
@@ -1854,7 +1846,6 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX10-NEXT: v_mov_b32_e32 v0, 0
-; GFX10-NEXT: s_mov_b32 s5, 0xff800000
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_and_b32 s0, s2, -4
; GFX10-NEXT: s_mov_b32 s1, s3
@@ -1872,7 +1863,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX10-NEXT: v_lshrrev_b32_sdwa v1, s2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX10-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX10-NEXT: v_and_or_b32 v4, v1, s5, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
@@ -1895,7 +1886,6 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX11-LABEL: global_atomic_fadd_ret_bf16_system:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x24
-; GFX11-NEXT: s_mov_b32 s5, 0xff800000
; GFX11-NEXT: v_mov_b32_e32 v0, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_and_b32 s0, s2, -4
@@ -1916,7 +1906,7 @@ define amdgpu_kernel void @global_atomic_fadd_ret_bf16_system(ptr addrspace(1) %
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_add_f32_e32 v1, 4.0, v1
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_and_or_b32 v4, v1, s5, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
diff --git a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll
index 6a7fb7142c29..ba946fe00a8c 100644
--- a/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/isel-amdgpu-cs-chain-preserve-cc.ll
@@ -912,10 +912,9 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
; DAGISEL-GFX11-WF32-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
; DAGISEL-GFX11-WF32-NEXT: [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
; DAGISEL-GFX11-WF32-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
- ; DAGISEL-GFX11-WF32-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
- ; DAGISEL-GFX11-WF32-NEXT: [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
+ ; DAGISEL-GFX11-WF32-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_1]], implicit $exec
; DAGISEL-GFX11-WF32-NEXT: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
- ; DAGISEL-GFX11-WF32-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
+ ; DAGISEL-GFX11-WF32-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
; DAGISEL-GFX11-WF32-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-WF32-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[DEF]]
; DAGISEL-GFX11-WF32-NEXT: FLAT_STORE_SHORT_D16_HI killed [[COPY2]], killed [[V_CNDMASK_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into `ptr poison`)
@@ -934,10 +933,9 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
; DAGISEL-GFX11-WF64-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
; DAGISEL-GFX11-WF64-NEXT: [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
; DAGISEL-GFX11-WF64-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
- ; DAGISEL-GFX11-WF64-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
- ; DAGISEL-GFX11-WF64-NEXT: [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
+ ; DAGISEL-GFX11-WF64-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_1]], implicit $exec
; DAGISEL-GFX11-WF64-NEXT: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_64_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
- ; DAGISEL-GFX11-WF64-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
+ ; DAGISEL-GFX11-WF64-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
; DAGISEL-GFX11-WF64-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX11-WF64-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[DEF]]
; DAGISEL-GFX11-WF64-NEXT: FLAT_STORE_SHORT_D16_HI killed [[COPY2]], killed [[V_CNDMASK_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into `ptr poison`)
@@ -956,10 +954,9 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
; DAGISEL-GFX10-WF32-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
; DAGISEL-GFX10-WF32-NEXT: [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
; DAGISEL-GFX10-WF32-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
- ; DAGISEL-GFX10-WF32-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
- ; DAGISEL-GFX10-WF32-NEXT: [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
+ ; DAGISEL-GFX10-WF32-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_1]], implicit $exec
; DAGISEL-GFX10-WF32-NEXT: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
- ; DAGISEL-GFX10-WF32-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
+ ; DAGISEL-GFX10-WF32-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
; DAGISEL-GFX10-WF32-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-WF32-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[DEF]]
; DAGISEL-GFX10-WF32-NEXT: FLAT_STORE_SHORT_D16_HI killed [[COPY2]], killed [[V_CNDMASK_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into `ptr poison`)
@@ -978,10 +975,9 @@ define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_cc_bfloat(bfloat inreg %a,
; DAGISEL-GFX10-WF64-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32767
; DAGISEL-GFX10-WF64-NEXT: [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 killed [[V_BFE_U32_e64_]], [[V_ADD_F32_e64_]], killed [[S_MOV_B32_]], implicit $exec
; DAGISEL-GFX10-WF64-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 4194304
- ; DAGISEL-GFX10-WF64-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 -8388608
- ; DAGISEL-GFX10-WF64-NEXT: [[V_AND_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_2]], killed [[S_MOV_B32_1]], implicit $exec
+ ; DAGISEL-GFX10-WF64-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F32_e64_]], killed [[S_MOV_B32_1]], implicit $exec
; DAGISEL-GFX10-WF64-NEXT: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_64_xexec = nofpexcept V_CMP_U_F32_e64 0, [[V_ADD_F32_e64_]], 0, [[V_ADD_F32_e64_]], 0, implicit $mode, implicit $exec
- ; DAGISEL-GFX10-WF64-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_AND_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
+ ; DAGISEL-GFX10-WF64-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, killed [[V_ADD3_U32_e64_]], 0, killed [[V_OR_B32_e64_]], killed [[V_CMP_U_F32_e64_]], implicit $exec
; DAGISEL-GFX10-WF64-NEXT: [[DEF:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
; DAGISEL-GFX10-WF64-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[DEF]]
; DAGISEL-GFX10-WF64-NEXT: FLAT_STORE_SHORT_D16_HI killed [[COPY2]], killed [[V_CNDMASK_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s16) into `ptr poison`)
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
index 48ae98f125bf..5e76dfd9bddd 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
@@ -1413,9 +1413,8 @@ define bfloat @lds_atomic_fadd_ret_bf16(ptr addrspace(3) %ptr) nounwind {
; VI-NEXT: v_add_f32_e32 v3, 4.0, v3
; VI-NEXT: v_bfe_u32 v6, v3, 16, 1
; VI-NEXT: v_add_u32_e32 v6, vcc, v6, v3
-; VI-NEXT: v_and_b32_e32 v7, 0xff800000, v3
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; VI-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; VI-NEXT: v_or_b32_e32 v7, 0x400000, v3
; VI-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; VI-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
; VI-NEXT: v_and_b32_e32 v5, v4, v2
@@ -1451,9 +1450,8 @@ define bfloat @lds_atomic_fadd_ret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX9-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_add_f32_e32 v3, 4.0, v3
; GFX9-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v3
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v3, s6
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
@@ -1560,9 +1558,8 @@ define void @lds_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; VI-NEXT: v_add_f32_e32 v4, 4.0, v4
; VI-NEXT: v_bfe_u32 v6, v4, 16, 1
; VI-NEXT: v_add_u32_e32 v6, vcc, v6, v4
-; VI-NEXT: v_and_b32_e32 v7, 0xff800000, v4
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; VI-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; VI-NEXT: v_or_b32_e32 v7, 0x400000, v4
; VI-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; VI-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
; VI-NEXT: v_and_b32_e32 v5, v3, v2
@@ -1597,9 +1594,8 @@ define void @lds_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX9-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_add_f32_e32 v4, 4.0, v4
; GFX9-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v4
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v4
; GFX9-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
; GFX9-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
diff --git a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
index 672c93b6adf7..66c49ba8b734 100644
--- a/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector_shuffle.packed.ll
@@ -4259,65 +4259,57 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX9-NEXT: v_fma_f32 v7, v8, v9, v7
; GFX9-NEXT: v_fma_f32 v1, v8, v5, v1
-; GFX9-NEXT: v_fma_f32 v8, v12, v9, v11
; GFX9-NEXT: v_fma_f32 v2, v12, v5, v2
; GFX9-NEXT: v_bfe_u32 v5, v7, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v9, 0xff800000, v7
+; GFX9-NEXT: v_fma_f32 v8, v12, v9, v11
+; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v7
; GFX9-NEXT: v_bfe_u32 v11, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v12, 0xff800000, v1
; GFX9-NEXT: v_add3_u32 v5, v5, v7, s2
-; GFX9-NEXT: v_or_b32_e32 v9, 0x400000, v9
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX9-NEXT: v_or_b32_e32 v12, 0x400000, v1
; GFX9-NEXT: v_bfe_u32 v13, v8, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v14, 0xff800000, v8
; GFX9-NEXT: v_add3_u32 v11, v11, v1, s2
-; GFX9-NEXT: v_or_b32_e32 v12, 0x400000, v12
; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_or_b32_e32 v14, 0x400000, v8
; GFX9-NEXT: v_bfe_u32 v15, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v16, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v13, v13, v8, s2
-; GFX9-NEXT: v_or_b32_e32 v14, 0x400000, v14
; GFX9-NEXT: v_cndmask_b32_e32 v1, v11, v12, vcc
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
+; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v6
+; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v2
; GFX9-NEXT: v_add3_u32 v15, v15, v2, s2
-; GFX9-NEXT: v_or_b32_e32 v16, 0x400000, v16
; GFX9-NEXT: v_cndmask_b32_e32 v7, v13, v14, vcc
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v6
-; GFX9-NEXT: v_cndmask_b32_e32 v2, v15, v16, vcc
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v15, v16, vcc
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_fma_f32 v1, v3, v10, v1
+; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX9-NEXT: v_fma_f32 v1, v3, v10, v1
; GFX9-NEXT: v_fma_f32 v3, v3, v6, v5
+; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 1
; GFX9-NEXT: v_fma_f32 v2, v4, v10, v2
; GFX9-NEXT: v_fma_f32 v4, v4, v6, v7
-; GFX9-NEXT: v_bfe_u32 v5, v1, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v6, 0xff800000, v1
+; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v1
; GFX9-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v8, 0xff800000, v3
; GFX9-NEXT: v_add3_u32 v5, v5, v1, s2
-; GFX9-NEXT: v_or_b32_e32 v6, 0x400000, v6
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX9-NEXT: v_bfe_u32 v9, v2, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v10, 0xff800000, v2
; GFX9-NEXT: v_add3_u32 v7, v7, v3, s2
-; GFX9-NEXT: v_or_b32_e32 v8, 0x400000, v8
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v6, vcc
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v2
; GFX9-NEXT: v_bfe_u32 v11, v4, 16, 1
-; GFX9-NEXT: v_and_b32_e32 v12, 0xff800000, v4
; GFX9-NEXT: v_add3_u32 v9, v9, v2, s2
-; GFX9-NEXT: v_or_b32_e32 v10, 0x400000, v10
; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v8, vcc
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX9-NEXT: v_or_b32_e32 v12, 0x400000, v4
; GFX9-NEXT: v_add3_u32 v11, v11, v4, s2
-; GFX9-NEXT: v_or_b32_e32 v12, 0x400000, v12
; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v10, vcc
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
; GFX9-NEXT: v_cndmask_b32_e32 v4, v11, v12, vcc
@@ -4332,7 +4324,6 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x10
; GFX10-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x0
; GFX10-NEXT: v_lshlrev_b32_e32 v6, 3, v0
-; GFX10-NEXT: s_mov_b32 s2, 0xff800000
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_clause 0x2
; GFX10-NEXT: global_load_dwordx2 v[0:1], v6, s[0:1]
@@ -4355,20 +4346,20 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX10-NEXT: v_fmac_f32_e32 v11, v12, v9
; GFX10-NEXT: v_fmac_f32_e32 v1, v12, v4
; GFX10-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX10-NEXT: v_and_or_b32 v8, v7, s2, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v7
; GFX10-NEXT: v_bfe_u32 v9, v0, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT: v_and_or_b32 v12, v0, s2, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v0
; GFX10-NEXT: v_add3_u32 v4, v4, v7, 0x7fff
; GFX10-NEXT: v_bfe_u32 v15, v1, 16, 1
; GFX10-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
; GFX10-NEXT: v_bfe_u32 v13, v11, 16, 1
-; GFX10-NEXT: v_and_or_b32 v16, v1, s2, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v16, 0x400000, v1
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_add3_u32 v15, v15, v1, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v5
-; GFX10-NEXT: v_and_or_b32 v14, v11, s2, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v14, 0x400000, v11
; GFX10-NEXT: v_add3_u32 v13, v13, v11, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v0, v9, v12, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
@@ -4382,7 +4373,7 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX10-NEXT: v_fmac_f32_e32 v0, v2, v10
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_cndmask_b32_e32 v7, v13, v14, vcc_lo
-; GFX10-NEXT: v_and_or_b32 v8, v4, s2, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
; GFX10-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX10-NEXT: v_fmac_f32_e32 v1, v3, v10
; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
@@ -4390,14 +4381,14 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX10-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
; GFX10-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX10-NEXT: v_fmac_f32_e32 v7, v3, v5
-; GFX10-NEXT: v_and_or_b32 v3, v0, s2, 0x400000
-; GFX10-NEXT: v_and_or_b32 v10, v1, s2, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v0
+; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v1
; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
; GFX10-NEXT: v_add3_u32 v9, v9, v1, 0x7fff
; GFX10-NEXT: v_bfe_u32 v11, v7, 16, 1
; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_and_or_b32 v12, v7, s2, 0x400000
+; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v7
; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
; GFX10-NEXT: v_add3_u32 v11, v11, v7, 0x7fff
; GFX10-NEXT: v_cndmask_b32_e32 v1, v9, v10, vcc_lo
@@ -4416,7 +4407,6 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x10
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x0
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 3, v0
-; GFX11-NEXT: s_mov_b32 s0, 0xff800000
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_clause 0x2
; GFX11-NEXT: global_load_b64 v[0:1], v6, s[2:3]
@@ -4438,11 +4428,11 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_bfe_u32 v13, v11, 16, 1
-; GFX11-NEXT: v_and_or_b32 v14, v11, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v14, 0x400000, v11
; GFX11-NEXT: v_bfe_u32 v15, v1, 16, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_fmac_f32_e32 v7, v8, v9
-; GFX11-NEXT: v_and_or_b32 v16, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v16, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v13, v13, v11, 0x7fff
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-NEXT: v_add3_u32 v15, v15, v1, 0x7fff
@@ -4450,11 +4440,11 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_fmac_f32_e32 v0, v8, v4
; GFX11-NEXT: v_bfe_u32 v4, v7, 16, 1
-; GFX11-NEXT: v_and_or_b32 v8, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
; GFX11-NEXT: v_bfe_u32 v9, v0, 16, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_add3_u32 v4, v4, v7, 0x7fff
-; GFX11-NEXT: v_and_or_b32 v12, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v0
; GFX11-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc_lo
@@ -4466,7 +4456,7 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX11-NEXT: v_dual_fmac_f32 v4, v2, v5 :: v_dual_cndmask_b32 v1, v15, v16
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_and_or_b32 v8, v4, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v4
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11-NEXT: v_cndmask_b32_e32 v7, v13, v14, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
@@ -4480,14 +4470,14 @@ define amdgpu_kernel void @fma_shuffle_v2bf16(ptr addrspace(1) nocapture readonl
; GFX11-NEXT: v_add3_u32 v9, v9, v1, 0x7fff
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11-NEXT: v_bfe_u32 v11, v7, 16, 1
-; GFX11-NEXT: v_and_or_b32 v12, v7, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v12, 0x400000, v7
; GFX11-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_fmac_f32_e32 v0, v2, v10
-; GFX11-NEXT: v_and_or_b32 v10, v1, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v1
; GFX11-NEXT: v_add3_u32 v11, v11, v7, 0x7fff
; GFX11-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX11-NEXT: v_and_or_b32 v3, v0, s0, 0x400000
+; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v0
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add3_u32 v2, v2, v0, 0x7fff
diff --git a/llvm/test/CodeGen/Hexagon/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/Hexagon/atomicrmw-uinc-udec-wrap.ll
index 9d7570b9a929..d51c9554a022 100644
--- a/llvm/test/CodeGen/Hexagon/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/Hexagon/atomicrmw-uinc-udec-wrap.ll
@@ -160,10 +160,8 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; CHECK-NEXT: }
; CHECK-NEXT: {
; CHECK-NEXT: p0 = cmp.gtu(r3:2,r5:4)
-; CHECK-NEXT: }
-; CHECK-NEXT: {
-; CHECK-NEXT: r8 = mux(p0,r8,r1)
-; CHECK-NEXT: r9 = mux(p0,r9,r1)
+; CHECK-NEXT: if (!p0.new) r8 = add(r1,#0)
+; CHECK-NEXT: if (!p0.new) r9 = add(r1,#0)
; CHECK-NEXT: }
; CHECK-NEXT: {
; CHECK-NEXT: memd_locked(r0,p0) = r9:8
diff --git a/llvm/test/CodeGen/Hexagon/bit-store-upper-sub-hi.mir b/llvm/test/CodeGen/Hexagon/bit-store-upper-sub-hi.mir
new file mode 100644
index 000000000000..ef84043cf502
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/bit-store-upper-sub-hi.mir
@@ -0,0 +1,21 @@
+# RUN: llc -march=hexagon -run-pass=hexagon-bit-simplify -o - %s | FileCheck %s
+
+# This test checks if the HexagonBitSimplify pass correctly replaces a
+# S2_storerh_io with a S2_storerf_io that stores the upper halfword
+# of a high subregister using appropriate subregister boundaries.
+
+# CHECK: S2_storerf_io %0, 28, %{{[0-9]+}}.isub_hi
+# CHECK-NOT: S2_storerf_io %0, 28, %{{[0-9]+}}.isub_lo
+
+---
+name: test_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $r0
+ %0:intregs = COPY $r0
+ %1:doubleregs = IMPLICIT_DEF
+ %2:doubleregs = IMPLICIT_DEF
+ %3:doubleregs = S2_shuffoh %2, %1
+ S2_storerh_io %0, 28, %3.isub_hi
+...
diff --git a/llvm/test/CodeGen/Hexagon/isel/select-vec.ll b/llvm/test/CodeGen/Hexagon/isel/select-vec.ll
index 4e54aa421224..7073c1a2a609 100644
--- a/llvm/test/CodeGen/Hexagon/isel/select-vec.ll
+++ b/llvm/test/CodeGen/Hexagon/isel/select-vec.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=hexagon < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s
define <4 x i8> @f0(<4 x i8> %a0, <4 x i8> %a1, i32 %a2) #0 {
; CHECK-LABEL: f0:
diff --git a/llvm/test/CodeGen/Hexagon/load-const-extend-opt.ll b/llvm/test/CodeGen/Hexagon/load-const-extend-opt.ll
new file mode 100644
index 000000000000..6f9e83c23ab3
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/load-const-extend-opt.ll
@@ -0,0 +1,68 @@
+; RUN: llc -march=hexagon -O3 -hexagon-small-data-threshold=0 < %s | FileCheck %s
+; This test checks the case if there are more than 2 uses of a constan address, move the
+; value in to a register and replace all instances of constant with the register.
+; The GenMemAbsolute pass generates a absolute-set instruction if there are more
+; than 2 uses of this register.
+
+; CHECK: loadi32_3
+; CHECK-NOT: r{{[0-9]+}} = memw(##441652)
+; CHECK-NOT: r{{[0-9]+}} = memw(r{{[0-9]+}}+#0)
+; CHECK:r{{[0-9]+}} = memw(r[[REG:[0-9]+]]=##441652)
+; CHECK-NOT: r{{[0-9]+}} = {emw(##441652)
+; CHECK:r{{[0-9]+}} = memw(r[[REG]]+#0)
+; CHECK-NOT: r{{[0-9]+}} = memw(##441652)
+; CHECK:r{{[0-9]+}} = memw(r[[REG]]+#0)
+; CHECK-NOT: r{{[0-9]+}} = memw(##441652)
+
+define void @loadi32_3() #0 {
+entry:
+ %0 = load volatile i32, ptr inttoptr (i32 441652 to ptr), align 4
+ %1 = load volatile i32, ptr inttoptr (i32 441652 to ptr), align 4
+ %2 = load volatile i32, ptr inttoptr (i32 441652 to ptr), align 4
+ ret void
+}
+
+; CHECK: loadi32_2
+; CHECK-NOT: r{{[0-9]+}} = ##441652
+; CHECK: r{{[0-9]+}} = memw(##441652)
+; CHECK: r{{[0-9]+}} = memw(##441652)
+
+define void @loadi32_2() #0 {
+entry:
+ %0 = load volatile i32, ptr inttoptr (i32 441652 to ptr), align 4
+ %1 = load volatile i32, ptr inttoptr (i32 441652 to ptr), align 4
+ ret void
+}
+
+; CHECK: loadi32_abs_global_3
+; CHECK-NOT: r{{[0-9]+}} = memw(##globalInt)
+; CHECK-NOT: r{{[0-9]+}} = memw(r{{[0-9]+}}+#0)
+; CHECK:r{{[0-9]+}} = memw(r[[REG:[0-9]+]]=##globalInt)
+; CHECK-NOT: r{{[0-9]+}} = memw(##globalInt)
+; CHECK:r{{[0-9]+}} = memw(r[[REG]]+#0)
+; CHECK-NOT: r{{[0-9]+}} = memw(##globalInt)
+; CHECK:r{{[0-9]+}} = memw(r[[REG]]+#0)
+; CHECK-NOT: r{{[0-9]+}} = memw(##globalInt)
+
+@globalInt = external global i32, align 8
+define void @loadi32_abs_global_3() #0 {
+entry:
+ %0 = load volatile i32, ptr @globalInt, align 4
+ %1 = load volatile i32, ptr @globalInt, align 4
+ %2 = load volatile i32, ptr @globalInt, align 4
+ ret void
+}
+
+; CHECK: loadi32_abs_global_2
+; CHECK-NOT:r[[REG:[0-9]+]] = ##globalInt
+; CHECK:r{{[0-9]+}} = memw(##globalInt)
+; CHECK:r{{[0-9]+}} = memw(##globalInt)
+
+define void @loadi32_abs_global_2() #0 {
+entry:
+ %0 = load volatile i32, ptr @globalInt, align 4
+ %1 = load volatile i32, ptr @globalInt, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/Hexagon/post-inc-vec.mir b/llvm/test/CodeGen/Hexagon/post-inc-vec.mir
new file mode 100644
index 000000000000..3788dc3fecd8
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/post-inc-vec.mir
@@ -0,0 +1,413 @@
+#RUN: llc -march=hexagon -run-pass hexagon-postincopt %s -o - | FileCheck %s
+
+# Test that we do not generate two post-increment vector load/store
+# in the loop.
+# CHECK: J2_loop0r
+# CHECK: V6_vS32b_pi
+# CHECK-NOT: = V6_vL32b_pi
+# CHECK: V6_vL32b_ai
+# CHECK: V6_vL32b_ai
+# CHECK: V6_vS32b_ai
+# CHECK: ENDLOOP0
+
+--- |
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <1024 x i1> @llvm.hexagon.V6.pred.scalar2v2.128B(i32) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(write)
+ declare void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<1024 x i1>, ptr, <32 x i32>) #1
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <32 x i32> @llvm.hexagon.V6.lvsplath.128B(i32) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <32 x i32> @llvm.hexagon.V6.vasrhbsat.128B(<32 x i32>, <32 x i32>, i32) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32>, <32 x i32>) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32>, i32) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <32 x i32> @llvm.hexagon.V6.vavgh.128B(<32 x i32>, <32 x i32>) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare <64 x i32> @llvm.hexagon.V6.vmpabusv.128B(<64 x i32>, <64 x i32>) #0
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write)
+ declare void @llvm.assume(i1 noundef) #2
+
+ ; Function Attrs: noinline nounwind
+ define void @blah(i32 %0, i32 %1, ptr noalias %2, ptr noalias nocapture readonly %3, ptr noalias nocapture readonly %4, ptr nocapture readnone %5, ptr nocapture readnone %6, i32 %7, i32 %8, ptr nocapture readonly %9, ptr nocapture readonly %10) local_unnamed_addr #3 {
+ entry:
+ %11 = call i32 @llvm.hexagon.S2.extractu(i32 %0, i32 23, i32 9)
+ %12 = shl i32 %11, 7
+ %mul16.i = mul nsw i32 %12, %1
+ %add.i = add nsw i32 %1, 1
+ %mul17.i = mul nsw i32 %add.i, %12
+ %cmp184.i = icmp slt i32 %mul16.i, %mul17.i
+ br i1 %cmp184.i, label %for.body.lr.ph.i, label %for.end.i
+
+ for.body.lr.ph.i: ; preds = %entry
+ %13 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> <i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576, i32 1077952576>, <32 x i32> <i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144, i32 269488144>) #5
+ %14 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> zeroinitializer, <32 x i32> zeroinitializer) #5
+ %15 = tail call <32 x i32> @llvm.hexagon.V6.lvsplath.128B(i32 32) #5
+ %cgep = getelementptr i8, ptr %2, i32 %mul16.i
+ %cgep8 = getelementptr i8, ptr %4, i32 %mul16.i
+ %cgep9 = getelementptr i8, ptr %3, i32 %mul16.i
+ br label %for.body.i
+
+ for.body.i: ; preds = %for.body.i, %for.body.lr.ph.i
+ %lsr.iv6 = phi ptr [ %cgep12, %for.body.i ], [ %cgep9, %for.body.lr.ph.i ]
+ %lsr.iv3 = phi ptr [ %cgep11, %for.body.i ], [ %cgep8, %for.body.lr.ph.i ]
+ %lsr.iv = phi ptr [ %cgep10, %for.body.i ], [ %cgep, %for.body.lr.ph.i ]
+ %elemIdx.05.i = phi i32 [ %mul16.i, %for.body.lr.ph.i ], [ %add19.i, %for.body.i ]
+ %16 = load <32 x i32>, ptr %lsr.iv6, align 128
+ %17 = load <32 x i32>, ptr %lsr.iv3, align 128
+ %18 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %17, <32 x i32> %16) #5
+ %19 = tail call <64 x i32> @llvm.hexagon.V6.vmpabusv.128B(<64 x i32> %13, <64 x i32> %18) #5
+ %20 = tail call <64 x i32> @llvm.hexagon.V6.vmpabusv.128B(<64 x i32> %14, <64 x i32> %18) #5
+ %21 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %19) #5
+ %22 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %20) #5
+ %23 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %22, i32 7) #5
+ %24 = tail call <32 x i32> @llvm.hexagon.V6.vavgh.128B(<32 x i32> %21, <32 x i32> %23) #5
+ %25 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %19) #5
+ %26 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %20) #5
+ %27 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %26, i32 7) #5
+ %28 = tail call <32 x i32> @llvm.hexagon.V6.vavgh.128B(<32 x i32> %25, <32 x i32> %27) #5
+ %29 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %24, <32 x i32> %15) #5
+ %30 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %28, <32 x i32> %15) #5
+ %31 = tail call <32 x i32> @llvm.hexagon.V6.vasrhbsat.128B(<32 x i32> %29, <32 x i32> %30, i32 4) #5
+ store <32 x i32> %31, ptr %lsr.iv, align 128
+ %add19.i = add nsw i32 %elemIdx.05.i, 128
+ %cmp18.i = icmp slt i32 %add19.i, %mul17.i
+ %cgep10 = getelementptr i8, ptr %lsr.iv, i32 128
+ %cgep11 = getelementptr i8, ptr %lsr.iv3, i32 128
+ %cgep12 = getelementptr i8, ptr %lsr.iv6, i32 128
+ br i1 %cmp18.i, label %for.body.i, label %for.end.i
+
+ for.end.i: ; preds = %for.body.i, %entry
+ ret void
+ }
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
+ declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #4
+
+ ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
+ declare i32 @llvm.hexagon.S2.extractu(i32, i32 immarg, i32 immarg) #0
+
+ attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
+ attributes #1 = { nocallback nofree nosync nounwind willreturn memory(write) }
+ attributes #2 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) }
+ attributes #3 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv68" "target-features"="+hvx-length128b,+hvxv68,+v68,-long-calls,-small-data" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #4 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
+ attributes #5 = { nounwind }
+
+...
+---
+name: blah
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHCatchret: false
+hasEHScopes: false
+hasEHFunclets: false
+isOutlined: false
+debugInstrRef: false
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+ - { id: 0, class: intregs, preferred-register: '' }
+ - { id: 1, class: intregs, preferred-register: '' }
+ - { id: 2, class: hvxwr, preferred-register: '' }
+ - { id: 3, class: hvxwr, preferred-register: '' }
+ - { id: 4, class: hvxvr, preferred-register: '' }
+ - { id: 5, class: intregs, preferred-register: '' }
+ - { id: 6, class: intregs, preferred-register: '' }
+ - { id: 7, class: intregs, preferred-register: '' }
+ - { id: 8, class: intregs, preferred-register: '' }
+ - { id: 9, class: intregs, preferred-register: '' }
+ - { id: 10, class: intregs, preferred-register: '' }
+ - { id: 11, class: intregs, preferred-register: '' }
+ - { id: 12, class: intregs, preferred-register: '' }
+ - { id: 13, class: intregs, preferred-register: '' }
+ - { id: 14, class: intregs, preferred-register: '' }
+ - { id: 15, class: intregs, preferred-register: '' }
+ - { id: 16, class: intregs, preferred-register: '' }
+ - { id: 17, class: intregs, preferred-register: '' }
+ - { id: 18, class: intregs, preferred-register: '' }
+ - { id: 19, class: intregs, preferred-register: '' }
+ - { id: 20, class: intregs, preferred-register: '' }
+ - { id: 21, class: intregs, preferred-register: '' }
+ - { id: 22, class: intregs, preferred-register: '' }
+ - { id: 23, class: intregs, preferred-register: '' }
+ - { id: 24, class: intregs, preferred-register: '' }
+ - { id: 25, class: predregs, preferred-register: '' }
+ - { id: 26, class: predregs, preferred-register: '' }
+ - { id: 27, class: hvxvr, preferred-register: '' }
+ - { id: 28, class: intregs, preferred-register: '' }
+ - { id: 29, class: hvxvr, preferred-register: '' }
+ - { id: 30, class: intregs, preferred-register: '' }
+ - { id: 31, class: hvxvr, preferred-register: '' }
+ - { id: 32, class: intregs, preferred-register: '' }
+ - { id: 33, class: hvxvr, preferred-register: '' }
+ - { id: 34, class: hvxvr, preferred-register: '' }
+ - { id: 35, class: hvxwr, preferred-register: '' }
+ - { id: 36, class: hvxwr, preferred-register: '' }
+ - { id: 37, class: hvxwr, preferred-register: '' }
+ - { id: 38, class: hvxvr, preferred-register: '' }
+ - { id: 39, class: hvxvr, preferred-register: '' }
+ - { id: 40, class: intregs, preferred-register: '' }
+ - { id: 41, class: hvxvr, preferred-register: '' }
+ - { id: 42, class: hvxvr, preferred-register: '' }
+ - { id: 43, class: hvxvr, preferred-register: '' }
+ - { id: 44, class: hvxvr, preferred-register: '' }
+ - { id: 45, class: hvxvr, preferred-register: '' }
+ - { id: 46, class: hvxvr, preferred-register: '' }
+ - { id: 47, class: hvxvr, preferred-register: '' }
+ - { id: 48, class: hvxvr, preferred-register: '' }
+ - { id: 49, class: intregslow8, preferred-register: '' }
+ - { id: 50, class: hvxvr, preferred-register: '' }
+ - { id: 51, class: predregs, preferred-register: '' }
+ - { id: 52, class: intregs, preferred-register: '' }
+ - { id: 53, class: intregs, preferred-register: '' }
+ - { id: 54, class: intregs, preferred-register: '' }
+ - { id: 55, class: intregs, preferred-register: '' }
+ - { id: 56, class: intregs, preferred-register: '' }
+ - { id: 57, class: intregs, preferred-register: '' }
+ - { id: 58, class: intregs, preferred-register: '' }
+ - { id: 59, class: intregs, preferred-register: '' }
+ - { id: 60, class: intregs, preferred-register: '' }
+ - { id: 61, class: hvxvr, preferred-register: '' }
+ - { id: 62, class: intregs, preferred-register: '' }
+ - { id: 63, class: hvxvr, preferred-register: '' }
+ - { id: 64, class: intregs, preferred-register: '' }
+ - { id: 65, class: hvxwr, preferred-register: '' }
+ - { id: 66, class: hvxwr, preferred-register: '' }
+ - { id: 67, class: hvxwr, preferred-register: '' }
+ - { id: 68, class: hvxvr, preferred-register: '' }
+ - { id: 69, class: hvxvr, preferred-register: '' }
+ - { id: 70, class: hvxvr, preferred-register: '' }
+ - { id: 71, class: hvxvr, preferred-register: '' }
+ - { id: 72, class: hvxvr, preferred-register: '' }
+ - { id: 73, class: hvxvr, preferred-register: '' }
+ - { id: 74, class: hvxvr, preferred-register: '' }
+ - { id: 75, class: intregs, preferred-register: '' }
+ - { id: 76, class: intregs, preferred-register: '' }
+ - { id: 77, class: intregs, preferred-register: '' }
+ - { id: 78, class: intregs, preferred-register: '' }
+ - { id: 79, class: hvxvr, preferred-register: '' }
+ - { id: 80, class: intregs, preferred-register: '' }
+ - { id: 81, class: hvxvr, preferred-register: '' }
+ - { id: 82, class: intregs, preferred-register: '' }
+ - { id: 83, class: hvxwr, preferred-register: '' }
+ - { id: 84, class: hvxwr, preferred-register: '' }
+ - { id: 85, class: hvxwr, preferred-register: '' }
+ - { id: 86, class: hvxvr, preferred-register: '' }
+ - { id: 87, class: hvxvr, preferred-register: '' }
+ - { id: 88, class: hvxvr, preferred-register: '' }
+ - { id: 89, class: hvxvr, preferred-register: '' }
+ - { id: 90, class: hvxvr, preferred-register: '' }
+ - { id: 91, class: hvxvr, preferred-register: '' }
+ - { id: 92, class: hvxvr, preferred-register: '' }
+ - { id: 93, class: intregs, preferred-register: '' }
+ - { id: 94, class: intregs, preferred-register: '' }
+ - { id: 95, class: intregs, preferred-register: '' }
+ - { id: 96, class: intregs, preferred-register: '' }
+ - { id: 97, class: predregs, preferred-register: '' }
+ - { id: 98, class: predregs, preferred-register: '' }
+liveins:
+ - { reg: '$r0', virtual-reg: '%16' }
+ - { reg: '$r1', virtual-reg: '%17' }
+ - { reg: '$r2', virtual-reg: '%18' }
+ - { reg: '$r3', virtual-reg: '%19' }
+ - { reg: '$r4', virtual-reg: '%20' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+ - { id: 0, type: default, offset: 24, size: 4, alignment: 8, stack-id: default,
+ isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 1, type: default, offset: 20, size: 4, alignment: 4, stack-id: default,
+ isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 2, type: default, offset: 16, size: 4, alignment: 8, stack-id: default,
+ isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 3, type: default, offset: 12, size: 4, alignment: 4, stack-id: default,
+ isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 4, type: default, offset: 8, size: 4, alignment: 8, stack-id: default,
+ isImmutable: true, isAliased: false, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ successors: %bb.1(0x40000000), %bb.3(0x40000000)
+ liveins: $r0, $r1, $r2, $r3, $r4
+
+ %20:intregs = COPY $r4
+ %19:intregs = COPY $r3
+ %18:intregs = COPY $r2
+ %17:intregs = COPY $r1
+ %16:intregs = COPY $r0
+ %22:intregs = S2_extractu %16, 23, 9
+ %23:intregs = S2_asl_i_r %22, 7
+ %0:intregs = nsw M2_mpyi %23, %17
+ %24:intregs = nsw A2_addi %17, 1
+ %1:intregs = nsw M2_mpyi %24, %23
+ %25:predregs = C2_cmpgt %1, %0
+ J2_jumpf %25, %bb.3, implicit-def dead $pc
+ J2_jump %bb.1, implicit-def dead $pc
+
+ bb.1.for.body.lr.ph.i:
+ successors: %bb.4(0x40000000), %bb.6(0x40000000)
+
+ %28:intregs = A2_tfrsi 269488144
+ %27:hvxvr = V6_lvsplatw %28
+ %30:intregs = A2_tfrsi 1077952576
+ %29:hvxvr = V6_lvsplatw %30
+ %2:hvxwr = REG_SEQUENCE %29, %subreg.vsub_hi, %27, %subreg.vsub_lo
+ %31:hvxvr = V6_vd0
+ %3:hvxwr = REG_SEQUENCE %31, %subreg.vsub_hi, %31, %subreg.vsub_lo
+ %32:intregs = A2_tfrsi 32
+ %4:hvxvr = V6_lvsplath %32
+ %5:intregs = A2_add %18, %0
+ %6:intregs = A2_add %20, %0
+ %7:intregs = A2_add %19, %0
+ %40:intregs = A2_tfrsi 7
+ %49:intregslow8 = A2_tfrsi 4
+ %52:intregs = A2_sub %1, %0
+ %53:intregs = A2_addi %52, 127
+ %54:intregs = S2_lsr_i_r %53, 7
+ %55:intregs = COPY %54
+ %56:intregs = S2_lsr_i_r %55, 1
+ %57:intregs = A2_andir %55, 1
+ %97:predregs = C2_cmpgtui %56, 0
+ J2_jumpf %97, %bb.6, implicit-def $pc
+ J2_jump %bb.4, implicit-def $pc
+
+ bb.4:
+ successors: %bb.5(0x80000000)
+
+ J2_loop0r %bb.5, %56, implicit-def $lc0, implicit-def $sa0, implicit-def $usr
+ J2_jump %bb.5, implicit-def $pc
+
+ bb.5:
+ successors: %bb.5(0x40000000), %bb.6(0x40000000)
+
+ %58:intregs = PHI %7, %bb.4, %80, %bb.5
+ %59:intregs = PHI %6, %bb.4, %82, %bb.5
+ %60:intregs = PHI %5, %bb.4, %93, %bb.5
+ %61:hvxvr, %62:intregs = V6_vL32b_pi %58, 128 :: (load (s1024) from %ir.lsr.iv6)
+ %63:hvxvr, %64:intregs = V6_vL32b_pi %59, 128 :: (load (s1024) from %ir.lsr.iv3)
+ %65:hvxwr = REG_SEQUENCE %63, %subreg.vsub_hi, %61, %subreg.vsub_lo
+ %66:hvxwr = V6_vmpabusv %2, %65
+ %67:hvxwr = V6_vmpabusv %3, %65
+ %68:hvxvr = V6_vasrh %67.vsub_hi, %40
+ %69:hvxvr = V6_vavgh %66.vsub_hi, %68
+ %70:hvxvr = V6_vasrh %67.vsub_lo, %40
+ %71:hvxvr = V6_vavgh %66.vsub_lo, %70
+ %72:hvxvr = V6_vaddhsat %69, %4
+ %73:hvxvr = V6_vaddhsat %71, %4
+ %74:hvxvr = V6_vasrhbsat %72, %73, %49
+ %75:intregs = V6_vS32b_pi %60, 128, %74 :: (store (s1024) into %ir.lsr.iv)
+ %79:hvxvr, %80:intregs = V6_vL32b_pi %62, 128 :: (load (s1024) from %ir.lsr.iv6 + 128)
+ %81:hvxvr, %82:intregs = V6_vL32b_pi %64, 128 :: (load (s1024) from %ir.lsr.iv3 + 128)
+ %83:hvxwr = REG_SEQUENCE %81, %subreg.vsub_hi, %79, %subreg.vsub_lo
+ %84:hvxwr = V6_vmpabusv %2, %83
+ %85:hvxwr = V6_vmpabusv %3, %83
+ %86:hvxvr = V6_vasrh %85.vsub_hi, %40
+ %87:hvxvr = V6_vavgh %84.vsub_hi, %86
+ %88:hvxvr = V6_vasrh %85.vsub_lo, %40
+ %89:hvxvr = V6_vavgh %84.vsub_lo, %88
+ %90:hvxvr = V6_vaddhsat %87, %4
+ %91:hvxvr = V6_vaddhsat %89, %4
+ %92:hvxvr = V6_vasrhbsat %90, %91, %49
+ %93:intregs = V6_vS32b_pi %75, 128, %92 :: (store (s1024) into %ir.lsr.iv + 128)
+ ENDLOOP0 %bb.5, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0
+ J2_jump %bb.6, implicit-def $pc
+
+ bb.6:
+ successors: %bb.7(0x40000000), %bb.8(0x40000000)
+
+ %94:intregs = PHI %7, %bb.1, %80, %bb.5
+ %95:intregs = PHI %6, %bb.1, %82, %bb.5
+ %96:intregs = PHI %5, %bb.1, %93, %bb.5
+ %98:predregs = C2_cmpgtui %57, 0
+ J2_jumpf %98, %bb.8, implicit-def $pc
+ J2_jump %bb.7, implicit-def $pc
+
+ bb.7:
+ successors: %bb.2(0x80000000)
+
+ J2_jump %bb.2, implicit-def $pc
+
+ bb.2.for.body.i (machine-block-address-taken):
+ successors: %bb.8(0x04000000)
+
+ %33:hvxvr, %15:intregs = V6_vL32b_pi %94, 128 :: (load (s1024) from %ir.lsr.iv6)
+ %34:hvxvr, %14:intregs = V6_vL32b_pi %95, 128 :: (load (s1024) from %ir.lsr.iv3)
+ %35:hvxwr = REG_SEQUENCE %34, %subreg.vsub_hi, %33, %subreg.vsub_lo
+ %36:hvxwr = V6_vmpabusv %2, %35
+ %37:hvxwr = V6_vmpabusv %3, %35
+ %41:hvxvr = V6_vasrh %37.vsub_hi, %40
+ %42:hvxvr = V6_vavgh %36.vsub_hi, %41
+ %45:hvxvr = V6_vasrh %37.vsub_lo, %40
+ %46:hvxvr = V6_vavgh %36.vsub_lo, %45
+ %47:hvxvr = V6_vaddhsat %42, %4
+ %48:hvxvr = V6_vaddhsat %46, %4
+ %50:hvxvr = V6_vasrhbsat %47, %48, %49
+ %13:intregs = V6_vS32b_pi %96, 128, %50 :: (store (s1024) into %ir.lsr.iv)
+ J2_jump %bb.8, implicit-def $pc
+
+ bb.8:
+ successors: %bb.3(0x80000000)
+
+ J2_jump %bb.3, implicit-def $pc
+
+ bb.3.for.end.i:
+ PS_jmpret $r31, implicit-def dead $pc
+
+...
diff --git a/llvm/test/CodeGen/Hexagon/post_inc_store.mir b/llvm/test/CodeGen/Hexagon/post_inc_store.mir
new file mode 100644
index 000000000000..3e3f51ac9114
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/post_inc_store.mir
@@ -0,0 +1,168 @@
+#RUN: llc -march=hexagon -run-pass hexagon-postincopt %s -o - | FileCheck %s
+
+# Test that we convert a post-inc load and store to a regular load and post-inc
+# store.
+# CHECK: J2_loop0r
+# CHECK-NOT: = L2_loadruh_pi
+# CHECK: L2_loadruh_io
+# CHECK: S2_storerh_pi
+# CHECK: ENDLOOP0
+
+--- |
+ ; Function Attrs: nofree norecurse nounwind
+ define dso_local void @blam(i32 %arg, ptr nocapture %arg1, i16 signext %arg2) local_unnamed_addr #0 {
+ bb:
+ %icmp = icmp eq i32 %arg, 0
+ br i1 %icmp, label %bb13, label %bb3
+
+ bb3: ; preds = %bb, %bb10
+ %phi = phi i32 [ %add11, %bb10 ], [ 0, %bb ]
+ %mul = mul i32 %phi, %arg
+ %cgep = getelementptr i16, ptr %arg1, i32 %mul
+ br label %bb4
+
+ bb4: ; preds = %bb4, %bb3
+ %lsr.iv = phi i32 [ %lsr.iv.next, %bb4 ], [ %arg, %bb3 ]
+ %phi5 = phi ptr [ %cgep, %bb3 ], [ %cgep1, %bb4 ]
+ %load = load i16, ptr %phi5, align 2
+ %add = add i16 %load, %arg2
+ store i16 %add, ptr %phi5, align 2
+ %lsr.iv.next = add i32 %lsr.iv, -1
+ %icmp8 = icmp eq i32 %lsr.iv.next, 0
+ %cgep1 = getelementptr i16, ptr %phi5, i32 1
+ br i1 %icmp8, label %bb10, label %bb4
+
+ bb10: ; preds = %bb4
+ %add11 = add nuw i32 %phi, 1
+ %icmp12 = icmp eq i32 %add11, %arg
+ br i1 %icmp12, label %bb13, label %bb3
+
+ bb13: ; preds = %bb10, %bb
+ ret void
+ }
+
+ attributes #0 = { nofree norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv68" "target-features"="+v68,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+...
+---
+name: blam
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHCatchret: false
+hasEHScopes: false
+hasEHFunclets: false
+isOutlined: false
+debugInstrRef: false
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+ - { id: 0, class: intregs, preferred-register: '' }
+ - { id: 1, class: intregs, preferred-register: '' }
+ - { id: 2, class: intregs, preferred-register: '' }
+ - { id: 3, class: intregs, preferred-register: '' }
+ - { id: 4, class: intregs, preferred-register: '' }
+ - { id: 5, class: intregs, preferred-register: '' }
+ - { id: 6, class: intregs, preferred-register: '' }
+ - { id: 7, class: intregs, preferred-register: '' }
+ - { id: 8, class: intregs, preferred-register: '' }
+ - { id: 9, class: intregs, preferred-register: '' }
+ - { id: 10, class: intregs, preferred-register: '' }
+ - { id: 11, class: intregs, preferred-register: '' }
+ - { id: 12, class: predregs, preferred-register: '' }
+ - { id: 13, class: intregs, preferred-register: '' }
+ - { id: 14, class: intregs, preferred-register: '' }
+ - { id: 15, class: intregs, preferred-register: '' }
+ - { id: 16, class: predregs, preferred-register: '' }
+ - { id: 17, class: predregs, preferred-register: '' }
+ - { id: 18, class: predregs, preferred-register: '' }
+ - { id: 19, class: predregs, preferred-register: '' }
+ - { id: 20, class: intregs, preferred-register: '' }
+ - { id: 21, class: intregs, preferred-register: '' }
+liveins:
+ - { reg: '$r0', virtual-reg: '%7' }
+ - { reg: '$r1', virtual-reg: '%8' }
+ - { reg: '$r2', virtual-reg: '%9' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 1
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo: {}
+body: |
+ bb.0.bb:
+ successors: %bb.4(0x30000000), %bb.5(0x50000000)
+ liveins: $r0, $r1, $r2
+
+ %9:intregs = COPY $r2
+ %8:intregs = COPY $r1
+ %7:intregs = COPY $r0
+ %21:intregs = COPY %7
+ %20:intregs = COPY %7
+ %12:predregs = C2_cmpeqi %7, 0
+ J2_jumpt %12, %bb.4, implicit-def $pc
+
+ bb.5:
+ successors: %bb.1(0x80000000)
+
+ %11:intregs = A2_tfrsi 0
+ J2_loop1r %bb.1, %21, implicit-def $lc1, implicit-def $sa1
+
+ bb.1.bb3 (machine-block-address-taken):
+ successors: %bb.2(0x80000000)
+
+ %0:intregs = PHI %11, %bb.5, %6, %bb.3
+ %13:intregs = M2_mpyi %0, %7
+ %1:intregs = S2_addasl_rrri %8, %13, 1
+ J2_loop0r %bb.2, %20, implicit-def $lc0, implicit-def $sa0, implicit-def $usr
+
+ bb.2.bb4 (machine-block-address-taken):
+ successors: %bb.3(0x04000000), %bb.2(0x7c000000)
+
+ %3:intregs = PHI %1, %bb.1, %5, %bb.2
+ %14:intregs = L2_loadruh_io %3, 0 :: (load (s16) from %ir.phi5)
+ %15:intregs = A2_add %14, %9
+ %5:intregs = S2_storerh_pi %3, 2, %15 :: (store (s16) into %ir.phi5)
+ ENDLOOP0 %bb.2, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0
+ J2_jump %bb.3, implicit-def dead $pc
+
+ bb.3.bb10:
+ successors: %bb.4(0x04000000), %bb.1(0x7c000000)
+
+ %6:intregs = nuw A2_addi %0, 1
+ ENDLOOP1 %bb.1, implicit-def $pc, implicit-def $lc1, implicit $sa1, implicit $lc1
+ J2_jump %bb.4, implicit-def dead $pc
+
+ bb.4.bb13:
+ PS_jmpret $r31, implicit-def dead $pc
+
+...
diff --git a/llvm/test/CodeGen/Hexagon/postincopt-crash.mir b/llvm/test/CodeGen/Hexagon/postincopt-crash.mir
new file mode 100644
index 000000000000..e22053421791
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/postincopt-crash.mir
@@ -0,0 +1,58 @@
+# RUN: llc -march=hexagon -run-pass=hexagon-postincopt %s -o /dev/null
+# REQUIRES: asserts
+# Test that we do not hit unreachable code dealt with L4_ior_memoph_io.
+
+...
+---
+name: foo
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ successors: %bb.4(0x30000000), %bb.5(0x50000000)
+ liveins: $r0, $r1, $r2
+
+ %9:intregs = COPY $r2
+ %8:intregs = COPY $r1
+ %7:intregs = COPY $r0
+ %21:intregs = COPY %7
+ %20:intregs = COPY %7
+ %12:predregs = C2_cmpeqi %7, 0
+ J2_jumpt %12, %bb.4, implicit-def $pc
+
+ bb.5:
+ successors: %bb.1(0x80000000)
+
+ %11:intregs = A2_tfrsi 0
+ J2_loop1r %bb.1, %21, implicit-def $lc1, implicit-def $sa1
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+
+ %0:intregs = PHI %11, %bb.5, %6, %bb.3
+ %13:intregs = M2_mpyi %0, %7
+ %1:intregs = S2_addasl_rrri %8, %13, 1
+ J2_loop0r %bb.2, %20, implicit-def $lc0, implicit-def $sa0, implicit-def $usr
+
+ bb.2:
+ successors: %bb.3(0x04000000), %bb.2(0x7c000000)
+
+ %3:intregs = PHI %1, %bb.1, %5, %bb.2
+ %14:intregs = L2_loadruh_io %3, 0
+ L4_ior_memoph_io %3:intregs, 0, 21
+ %15:intregs = A2_add %14, %9
+ %5:intregs = S2_storerh_pi %3, 2, %15
+ ENDLOOP0 %bb.2, implicit-def $pc, implicit-def $lc0, implicit $sa0, implicit $lc0
+ J2_jump %bb.3, implicit-def dead $pc
+
+ bb.3:
+ successors: %bb.4(0x04000000), %bb.1(0x7c000000)
+
+ %6:intregs = nuw A2_addi %0, 1
+ ENDLOOP1 %bb.1, implicit-def $pc, implicit-def $lc1, implicit $sa1, implicit $lc1
+ J2_jump %bb.4, implicit-def dead $pc
+
+ bb.4:
+ PS_jmpret $r31, implicit-def dead $pc
+
+...
diff --git a/llvm/test/CodeGen/Hexagon/postincopt-dcfetch.mir b/llvm/test/CodeGen/Hexagon/postincopt-dcfetch.mir
new file mode 100644
index 000000000000..27d653c99f7b
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/postincopt-dcfetch.mir
@@ -0,0 +1,19 @@
+# RUN: llc -march=hexagon -run-pass hexagon-postincopt %s -o - | FileCheck %s
+# Check that this doesn't crash.
+# CHECK: Y2_dcfetchbo
+
+name: fred
+tracksRegLiveness: true
+body: |
+ bb.0:
+ successors: %bb.1
+ %0:intregs = IMPLICIT_DEF
+
+ bb.1:
+ successors: %bb.1
+
+ %1:intregs = PHI %0:intregs, %bb.0, %2:intregs, %bb.1
+ Y2_dcfetchbo %1:intregs, 0
+ %2:intregs = A2_addi %1:intregs, 1
+ J2_jump %bb.1, implicit-def dead $pc
+...
diff --git a/llvm/test/CodeGen/Hexagon/reg-by-name.ll b/llvm/test/CodeGen/Hexagon/reg-by-name.ll
index 4abea83ba6dd..cc8807e4f4d6 100644
--- a/llvm/test/CodeGen/Hexagon/reg-by-name.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-by-name.ll
@@ -1,4 +1,4 @@
-; RUN: llc -march=hexagon < %s | FileCheck %s
+; RUN: llc -march=hexagon -hexagon-tfr-cleanup=0 < %s | FileCheck %s
target triple = "hexagon"
@@ -647,7 +647,7 @@ entry:
ret i32 %1
}
-attributes #0 = { noinline nounwind optnone "target-cpu"="hexagonv62" }
+attributes #0 = { noinline nounwind optnone "target-cpu"="hexagonv73" }
attributes #1 = { nounwind }
attributes #2 = { nounwind readonly }
diff --git a/llvm/test/CodeGen/Hexagon/store-const-extend-opt.ll b/llvm/test/CodeGen/Hexagon/store-const-extend-opt.ll
new file mode 100644
index 000000000000..dccf176f3bd0
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/store-const-extend-opt.ll
@@ -0,0 +1,72 @@
+; RUN: llc -march=hexagon -O3 -hexagon-small-data-threshold=0 < %s | FileCheck %s
+; This test checks the case if there are more than 2 uses of a constan address, move the
+; value in to a register and replace all instances of constant with the register.
+; The GenMemAbsolute pass generates a absolute-set instruction if there are more
+; than 2 uses of this register.
+
+; CHECK: storetrunci32_3
+; CHECK-NOT: memw(##441652) = r{{[0-9]+}}
+; CHECK-NOT: memw(r{{[0-9]+}}+#0) = r{{[0-9]+}}
+; CHECK:memw(r[[REG:[0-9]+]]=##441652) = r{{[0-9]+}}
+; CHECK-NOT: memw(##441652) = r{{[0-9]+}}
+; CHECK:memw(r[[REG]]+#0) = r{{[0-9]+}}
+; CHECK-NOT: memw(##441652) = r{{[0-9]+}}
+; CHECK:memw(r[[REG]]+#0) = r{{[0-9]+}}
+; CHECK-NOT: memw(##441652) = r{{[0-9]+}}
+
+define void @storetrunci32_3(i64 %descr_addr, i32 %rpm_or_sys, i32 %kkr) #0 {
+entry:
+ %conv = trunc i64 %descr_addr to i32
+ store volatile i32 %conv, ptr inttoptr (i32 441652 to ptr), align 4
+ store volatile i32 %rpm_or_sys, ptr inttoptr (i32 441652 to ptr), align 4
+ store volatile i32 %kkr, ptr inttoptr (i32 441652 to ptr), align 4
+ ret void
+}
+
+; CHECK: storetrunci32_2
+; CHECK-NOT: r{{[0-9]+}} = ##441652
+; CHECK: memw(##441652) = r{{[0-9]+}}
+; CHECK: memw(##441652) = r{{[0-9]+}}
+
+define void @storetrunci32_2(i64 %descr_addr, i32 %rpm_or_sys) #0 {
+entry:
+ %conv = trunc i64 %descr_addr to i32
+ store volatile i32 %conv, ptr inttoptr (i32 441652 to ptr), align 4
+ store volatile i32 %rpm_or_sys, ptr inttoptr (i32 441652 to ptr), align 4
+ ret void
+}
+
+; CHECK: storetrunci32_abs_global_3
+; CHECK-NOT: memw(##globalInt) = r{{[0-9]+}}
+; CHECK-NOT: memw(r{{[0-9]+}}+#0) = r{{[0-9]+}}
+; CHECK:memw(r[[REG:[0-9]+]]=##globalInt) = r{{[0-9]+}}
+; CHECK-NOT: memw(##globalInt) = r{{[0-9]+}}
+; CHECK:memw(r[[REG]]+#0) = r{{[0-9]+}}
+; CHECK-NOT: memw(##globalInt) = r{{[0-9]+}}
+; CHECK:memw(r[[REG]]+#0) = r{{[0-9]+}}
+; CHECK-NOT: memw(##globalInt) = r{{[0-9]+}}
+
+@globalInt = external global i32, align 8
+define void @storetrunci32_abs_global_3(i64 %descr_addr, i32 %rpm_or_sys, i32 %kkr) #0 {
+entry:
+ %conv = trunc i64 %descr_addr to i32
+ store volatile i32 %conv, ptr @globalInt, align 4
+ store volatile i32 %rpm_or_sys, ptr @globalInt, align 4
+ store volatile i32 %kkr, ptr @globalInt, align 4
+ ret void
+}
+
+; CHECK: storetrunci32_abs_global_2
+; CHECK-NOT:r[[REG:[0-9]+]] = ##globalInt
+; CHECK:memw(##globalInt) = r{{[0-9]+}}
+; CHECK:memw(##globalInt) = r{{[0-9]+}}
+
+define void @storetrunci32_abs_global_2(i64 %descr_addr, i32 %rpm_or_sys) #0 {
+entry:
+ %conv = trunc i64 %descr_addr to i32
+ store volatile i32 %conv, ptr @globalInt, align 4
+ store volatile i32 %rpm_or_sys, ptr @globalInt, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/Hexagon/tfr-slotindex.ll b/llvm/test/CodeGen/Hexagon/tfr-slotindex.ll
new file mode 100644
index 000000000000..cebba9476c68
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/tfr-slotindex.ll
@@ -0,0 +1,26 @@
+; Check that after tfr-cleanup COPY to $r0 is converted to tfrsi instruction
+; The tfrst instruction must use the same slot index as the COPY instruction
+; to avoid breaking live interval information.
+; Check that there is no machine verifier crash
+
+; RUN: llc -stop-after=tfr-cleanup -verify-machineinstrs %s -o - | FileCheck %s
+
+; CHECK: $r0 = A2_tfrsi 34767
+
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon"
+
+; Function Attrs: nounwind optsize
+define dso_local i32 @foo() local_unnamed_addr #0 {
+entry:
+ call void @bar(i32 34767) #1
+ call void @baz(i32 34767) #1
+ ret i32 15
+}
+
+declare void @bar(i32) local_unnamed_addr
+
+declare void @baz(i32) local_unnamed_addr
+
+attributes #0 = { nounwind optsize "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv68" "target-features"="+v68,-long-calls" }
+attributes #1 = { noduplicate nomerge nounwind }
diff --git a/llvm/test/CodeGen/Hexagon/valid-offset-loadbsw4.mir b/llvm/test/CodeGen/Hexagon/valid-offset-loadbsw4.mir
new file mode 100644
index 000000000000..fca42d547dfb
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/valid-offset-loadbsw4.mir
@@ -0,0 +1,32 @@
+# RUN: llc -march=hexagon -run-pass hexagon-postincopt -o - %s | FileCheck %s
+# REQUIRES: asserts
+
+# Check that this doesn't crash:
+# CHECK: L2_loadbsw4_io
+
+---
+name: fred
+tracksRegLiveness: true
+liveins:
+ - { reg: '$r0', virtual-reg: '%0' }
+body: |
+ bb.0:
+ successors: %bb.1(0x80000000)
+ liveins: $r0
+
+ %0:intregs = COPY $r0
+ %1:intregs = A2_tfrsi 240
+ %2:doubleregs = IMPLICIT_DEF
+ %3:doubleregs = IMPLICIT_DEF
+
+ bb.1:
+ successors: %bb.1(0x80000000)
+
+ %4:intregs = PHI %1, %bb.0, %5, %bb.1
+ %6:doubleregs = L2_loadbsw4_io %4, 0
+ %7:doubleregs = M2_vrmac_s0 %2, %6, %3
+ S2_storeri_io %0, 0, %7.isub_lo
+ %5:intregs = nuw A2_addi %4, 256
+ J2_jump %bb.1, implicit-def dead $pc
+
+...
diff --git a/llvm/test/CodeGen/LoongArch/intrinsic-memcpy.ll b/llvm/test/CodeGen/LoongArch/intrinsic-memcpy.ll
new file mode 100644
index 000000000000..09453004dcef
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/intrinsic-memcpy.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s
+
+%Box = type [6 x i64]
+
+define void @box(ptr noalias nocapture noundef writeonly sret(%Box) align 16 dereferenceable(48) %b, i64 %i) {
+; CHECK-LABEL: box:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi.d $sp, $sp, -96
+; CHECK-NEXT: .cfi_def_cfa_offset 96
+; CHECK-NEXT: slli.d $a2, $a1, 5
+; CHECK-NEXT: alsl.d $a1, $a1, $a2, 4
+; CHECK-NEXT: addi.d $a2, $sp, 0
+; CHECK-NEXT: add.d $a3, $a2, $a1
+; CHECK-NEXT: ldx.d $a1, $a1, $a2
+; CHECK-NEXT: st.d $a1, $a0, 0
+; CHECK-NEXT: ld.d $a1, $a3, 40
+; CHECK-NEXT: st.d $a1, $a0, 40
+; CHECK-NEXT: ld.d $a1, $a3, 32
+; CHECK-NEXT: st.d $a1, $a0, 32
+; CHECK-NEXT: ld.d $a1, $a3, 24
+; CHECK-NEXT: st.d $a1, $a0, 24
+; CHECK-NEXT: ld.d $a1, $a3, 16
+; CHECK-NEXT: st.d $a1, $a0, 16
+; CHECK-NEXT: ori $a1, $a3, 8
+; CHECK-NEXT: ld.d $a1, $a1, 0
+; CHECK-NEXT: st.d $a1, $a0, 8
+; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: ret
+ %1 = alloca [2 x %Box], align 16
+ %2 = getelementptr inbounds [2 x %Box], ptr %1, i64 0, i64 %i
+ call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 16 dereferenceable(48) %b, ptr noundef nonnull align 16 dereferenceable(48) %2, i64 48, i1 false)
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/param-load-store.ll b/llvm/test/CodeGen/NVPTX/param-load-store.ll
index c14dc88431d3..a29d4e1875cd 100644
--- a/llvm/test/CodeGen/NVPTX/param-load-store.ll
+++ b/llvm/test/CodeGen/NVPTX/param-load-store.ll
@@ -1135,31 +1135,86 @@ define %s_i8i32x4 @test_s_i1i32x4(%s_i8i32x4 %a) {
; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+2];
; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0+1];
; CHECK-DAG: ld.param.u8 %r{{.*}}, [test_s_i1i32x4p_param_0];
-; --- TODO
-; --- Unaligned parameter store/ return value load is broken in both nvcc
-; --- and llvm and needs to be fixed.
; CHECK: .param .align 1 .b8 param0[25];
-; CHECK-DAG: st.param.b32 [param0+0],
-; CHECK-DAG: st.param.b32 [param0+4],
+; CHECK-DAG: st.param.b8 [param0+0],
+; CHECK-DAG: st.param.b8 [param0+1],
+; CHECK-DAG: st.param.b8 [param0+2],
+; CHECK-DAG: st.param.b8 [param0+3],
+; CHECK-DAG: st.param.b8 [param0+4],
+; CHECK-DAG: st.param.b8 [param0+5],
+; CHECK-DAG: st.param.b8 [param0+6],
+; CHECK-DAG: st.param.b8 [param0+7],
; CHECK-DAG: st.param.b8 [param0+8],
-; CHECK-DAG: st.param.b32 [param0+9],
-; CHECK-DAG: st.param.b32 [param0+13],
-; CHECK-DAG: st.param.b64 [param0+17],
+; CHECK-DAG: st.param.b8 [param0+9],
+; CHECK-DAG: st.param.b8 [param0+10],
+; CHECK-DAG: st.param.b8 [param0+11],
+; CHECK-DAG: st.param.b8 [param0+12],
+; CHECK-DAG: st.param.b8 [param0+13],
+; CHECK-DAG: st.param.b8 [param0+14],
+; CHECK-DAG: st.param.b8 [param0+15],
+; CHECK-DAG: st.param.b8 [param0+16],
+; CHECK-DAG: st.param.b8 [param0+17],
+; CHECK-DAG: st.param.b8 [param0+18],
+; CHECK-DAG: st.param.b8 [param0+19],
+; CHECK-DAG: st.param.b8 [param0+20],
+; CHECK-DAG: st.param.b8 [param0+21],
+; CHECK-DAG: st.param.b8 [param0+22],
+; CHECK-DAG: st.param.b8 [param0+23],
+; CHECK-DAG: st.param.b8 [param0+24],
; CHECK: .param .align 1 .b8 retval0[25];
; CHECK: call.uni (retval0),
; CHECK-NEXT: test_s_i1i32x4p,
-; CHECK-DAG: ld.param.b32 %r41, [retval0+0];
-; CHECK-DAG: ld.param.b32 %r42, [retval0+4];
-; CHECK-DAG: ld.param.b8 %rs2, [retval0+8];
-; CHECK-DAG: ld.param.b32 %r43, [retval0+9];
-; CHECK-DAG: ld.param.b32 %r44, [retval0+13];
-; CHECK-DAG: ld.param.b64 %rd23, [retval0+17];
-; CHECK-DAG: st.param.b32 [func_retval0+0],
-; CHECK-DAG: st.param.b32 [func_retval0+4],
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+0];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+1];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+2];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+3];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+4];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+5];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+6];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+7];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+8];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+9];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+10];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+11];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+12];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+13];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+14];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+15];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+16];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+17];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+18];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+19];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+20];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+21];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+22];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+23];
+; CHECK-DAG: ld.param.b8 %rs{{[0-9]+}}, [retval0+24];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.b8 [func_retval0+0],
+; CHECK-DAG: st.param.b8 [func_retval0+1],
+; CHECK-DAG: st.param.b8 [func_retval0+2],
+; CHECK-DAG: st.param.b8 [func_retval0+3],
+; CHECK-DAG: st.param.b8 [func_retval0+4],
+; CHECK-DAG: st.param.b8 [func_retval0+5],
+; CHECK-DAG: st.param.b8 [func_retval0+6],
+; CHECK-DAG: st.param.b8 [func_retval0+7],
; CHECK-DAG: st.param.b8 [func_retval0+8],
-; CHECK-DAG: st.param.b32 [func_retval0+9],
-; CHECK-DAG: st.param.b32 [func_retval0+13],
-; CHECK-DAG: st.param.b64 [func_retval0+17],
+; CHECK-DAG: st.param.b8 [func_retval0+9],
+; CHECK-DAG: st.param.b8 [func_retval0+10],
+; CHECK-DAG: st.param.b8 [func_retval0+11],
+; CHECK-DAG: st.param.b8 [func_retval0+12],
+; CHECK-DAG: st.param.b8 [func_retval0+13],
+; CHECK-DAG: st.param.b8 [func_retval0+14],
+; CHECK-DAG: st.param.b8 [func_retval0+15],
+; CHECK-DAG: st.param.b8 [func_retval0+16],
+; CHECK-DAG: st.param.b8 [func_retval0+17],
+; CHECK-DAG: st.param.b8 [func_retval0+18],
+; CHECK-DAG: st.param.b8 [func_retval0+19],
+; CHECK-DAG: st.param.b8 [func_retval0+20],
+; CHECK-DAG: st.param.b8 [func_retval0+21],
+; CHECK-DAG: st.param.b8 [func_retval0+22],
+; CHECK-DAG: st.param.b8 [func_retval0+23],
+; CHECK-DAG: st.param.b8 [func_retval0+24],
define %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a) {
%r = tail call %s_i8i32x4p @test_s_i1i32x4p(%s_i8i32x4p %a);
diff --git a/llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll b/llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll
new file mode 100644
index 000000000000..40a3e9e945a2
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/unaligned-param-load-store.ll
@@ -0,0 +1,385 @@
+; Verifies correctness of load/store of parameters and return values.
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 -O0 -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap %s
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_35 -O0 -verify-machineinstrs | %ptxas-verify %}
+
+%s_i8i16p = type { <{ i16, i8, i16 }>, i64 }
+%s_i8i32p = type { <{ i32, i8, i32 }>, i64 }
+%s_i8i64p = type { <{ i64, i8, i64 }>, i64 }
+%s_i8f16p = type { <{ half, i8, half }>, i64 }
+%s_i8f16x2p = type { <{ <2 x half>, i8, <2 x half> }>, i64 }
+%s_i8f32p = type { <{ float, i8, float }>, i64 }
+%s_i8f64p = type { <{ double, i8, double }>, i64 }
+
+; -- All loads/stores from parameters aligned by one must be done one
+; byte at a time.
+; -- Notes:
+; -- There are two fields of interest in the packed part of the struct, one
+; with a proper offset and one without. The former should be loaded or
+; stored as a whole, and the latter by bytes.
+; -- Only loading and storing the said fields are checked in the following
+; series of tests so that they are more concise.
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[16])
+; CHECK-LABEL: test_s_i8i16p(
+; CHECK: .param .align 8 .b8 test_s_i8i16p_param_0[16]
+; CHECK-DAG: ld.param.u16 [[P0:%rs[0-9]+]], [test_s_i8i16p_param_0];
+; CHECK-DAG: ld.param.u8 [[P2_0:%rs[0-9]+]], [test_s_i8i16p_param_0+3];
+; CHECK-DAG: ld.param.u8 [[P2_1:%rs[0-9]+]], [test_s_i8i16p_param_0+4];
+; CHECK-DAG: shl.b16 [[P2_1_shl:%rs[0-9]+]], [[P2_1]], 8;
+; CHECK-DAG: or.b16 [[P2_1_or:%rs[0-9]+]], [[P2_1_shl]], [[P2_0]];
+; CHECK: { // callseq
+; CHECK: .param .align 8 .b8 param0[16];
+; CHECK-DAG: st.param.b16 [param0+0], [[P0]];
+; CHECK-DAG: st.param.b8 [param0+3], [[P2_1_or]];
+; CHECK-DAG: st.param.b8 [param0+4], [[P2_1]];
+; CHECK: .param .align 8 .b8 retval0[16];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_s_i8i16p,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-DAG: ld.param.b16 [[R0:%rs[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+3];
+; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+4];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.b16 [func_retval0+0], [[R0]];
+; CHECK-DAG: shl.b16 [[R2_1_shl:%rs[0-9]+]], [[R2_1]], 8;
+; CHECK-DAG: and.b16 [[R2_0_and:%rs[0-9]+]], [[R2_0]], 255;
+; CHECK-DAG: or.b16 [[R2:%rs[0-9]+]], [[R2_0_and]], [[R2_1_shl]];
+; CHECK-DAG: st.param.b8 [func_retval0+3], [[R2]];
+; CHECK-DAG: and.b16 [[R2_1_and:%rs[0-9]+]], [[R2_1]], 255;
+; CHECK-DAG: st.param.b8 [func_retval0+4], [[R2_1_and]];
+; CHECK: ret;
+
+define %s_i8i16p @test_s_i8i16p(%s_i8i16p %a) {
+ %r = tail call %s_i8i16p @test_s_i8i16p(%s_i8i16p %a)
+ ret %s_i8i16p %r
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[24])
+; CHECK-LABEL: test_s_i8i32p(
+; CHECK: .param .align 8 .b8 test_s_i8i32p_param_0[24]
+; CHECK-DAG: ld.param.u32 [[P0:%r[0-9]+]], [test_s_i8i32p_param_0];
+; CHECK-DAG: ld.param.u8 [[P2_0:%r[0-9]+]], [test_s_i8i32p_param_0+5];
+; CHECK-DAG: ld.param.u8 [[P2_1:%r[0-9]+]], [test_s_i8i32p_param_0+6];
+; CHECK-DAG: ld.param.u8 [[P2_2:%r[0-9]+]], [test_s_i8i32p_param_0+7];
+; CHECK-DAG: ld.param.u8 [[P2_3:%r[0-9]+]], [test_s_i8i32p_param_0+8];
+; CHECK-DAG: shl.b32 [[P2_1_shl:%r[0-9]+]], [[P2_1]], 8;
+; CHECK-DAG: shl.b32 [[P2_2_shl:%r[0-9]+]], [[P2_2]], 16;
+; CHECK-DAG: shl.b32 [[P2_3_shl:%r[0-9]+]], [[P2_3]], 24;
+; CHECK-DAG: or.b32 [[P2_or:%r[0-9]+]], [[P2_1_shl]], [[P2_0]];
+; CHECK-DAG: or.b32 [[P2_or_1:%r[0-9]+]], [[P2_3_shl]], [[P2_2_shl]];
+; CHECK-DAG: or.b32 [[P2:%r[0-9]+]], [[P2_or_1]], [[P2_or]];
+; CHECK-DAG: shr.u32 [[P2_1_shr:%r[0-9]+]], [[P2]], 8;
+; CHECK-DAG: shr.u32 [[P2_2_shr:%r[0-9]+]], [[P2_or_1]], 16;
+; CHECK: { // callseq
+; CHECK-DAG: .param .align 8 .b8 param0[24];
+; CHECK-DAG: st.param.b32 [param0+0], [[P0]];
+; CHECK-DAG: st.param.b8 [param0+5], [[P2]];
+; CHECK-DAG: st.param.b8 [param0+6], [[P2_1_shr]];
+; CHECK-DAG: st.param.b8 [param0+7], [[P2_2_shr]];
+; CHECK-DAG: st.param.b8 [param0+8], [[P2_3]];
+; CHECK: .param .align 8 .b8 retval0[24];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_s_i8i32p,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-DAG: ld.param.b32 [[R0:%r[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+5];
+; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+6];
+; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+7];
+; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+8];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.b32 [func_retval0+0], [[R0]];
+; CHECK-DAG: st.param.b8 [func_retval0+5],
+; CHECK-DAG: st.param.b8 [func_retval0+6],
+; CHECK-DAG: st.param.b8 [func_retval0+7],
+; CHECK-DAG: st.param.b8 [func_retval0+8],
+; CHECK: ret;
+
+define %s_i8i32p @test_s_i8i32p(%s_i8i32p %a) {
+ %r = tail call %s_i8i32p @test_s_i8i32p(%s_i8i32p %a)
+ ret %s_i8i32p %r
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[32])
+; CHECK-LABEL: test_s_i8i64p(
+; CHECK: .param .align 8 .b8 test_s_i8i64p_param_0[32]
+; CHECK-DAG: ld.param.u64 [[P0:%rd[0-9]+]], [test_s_i8i64p_param_0];
+; CHECK-DAG: ld.param.u8 [[P2_0:%rd[0-9]+]], [test_s_i8i64p_param_0+9];
+; CHECK-DAG: ld.param.u8 [[P2_1:%rd[0-9]+]], [test_s_i8i64p_param_0+10];
+; CHECK-DAG: ld.param.u8 [[P2_2:%rd[0-9]+]], [test_s_i8i64p_param_0+11];
+; CHECK-DAG: ld.param.u8 [[P2_3:%rd[0-9]+]], [test_s_i8i64p_param_0+12];
+; CHECK-DAG: ld.param.u8 [[P2_4:%rd[0-9]+]], [test_s_i8i64p_param_0+13];
+; CHECK-DAG: ld.param.u8 [[P2_5:%rd[0-9]+]], [test_s_i8i64p_param_0+14];
+; CHECK-DAG: ld.param.u8 [[P2_6:%rd[0-9]+]], [test_s_i8i64p_param_0+15];
+; CHECK-DAG: ld.param.u8 [[P2_7:%rd[0-9]+]], [test_s_i8i64p_param_0+16];
+; CHECK-DAG: shl.b64 [[P2_1_shl:%rd[0-9]+]], [[P2_1]], 8;
+; CHECK-DAG: shl.b64 [[P2_2_shl:%rd[0-9]+]], [[P2_2]], 16;
+; CHECK-DAG: shl.b64 [[P2_3_shl:%rd[0-9]+]], [[P2_3]], 24;
+; CHECK-DAG: or.b64 [[P2_or_0:%rd[0-9]+]], [[P2_1_shl]], [[P2_0]];
+; CHECK-DAG: or.b64 [[P2_or_1:%rd[0-9]+]], [[P2_3_shl]], [[P2_2_shl]];
+; CHECK-DAG: or.b64 [[P2_or_2:%rd[0-9]+]], [[P2_or_1]], [[P2_or_0]];
+; CHECK-DAG: shl.b64 [[P2_5_shl:%rd[0-9]+]], [[P2_5]], 8;
+; CHECK-DAG: shl.b64 [[P2_6_shl:%rd[0-9]+]], [[P2_6]], 16;
+; CHECK-DAG: shl.b64 [[P2_7_shl:%rd[0-9]+]], [[P2_7]], 24;
+; CHECK-DAG: or.b64 [[P2_or_3:%rd[0-9]+]], [[P2_5_shl]], [[P2_4]];
+; CHECK-DAG: or.b64 [[P2_or_4:%rd[0-9]+]], [[P2_7_shl]], [[P2_6_shl]];
+; CHECK-DAG: or.b64 [[P2_or_5:%rd[0-9]+]], [[P2_or_4]], [[P2_or_3]];
+; CHECK-DAG: shl.b64 [[P2_or_shl:%rd[0-9]+]], [[P2_or_5]], 32;
+; CHECK-DAG: or.b64 [[P2:%rd[0-9]+]], [[P2_or_shl]], [[P2_or_2]];
+; CHECK-DAG: shr.u64 [[P2_shr_1:%rd[0-9]+]], [[P2]], 8;
+; CHECK-DAG: shr.u64 [[P2_shr_2:%rd[0-9]+]], [[P2]], 16;
+; CHECK-DAG: shr.u64 [[P2_shr_3:%rd[0-9]+]], [[P2]], 24;
+; CHECK-DAG: bfe.u64 [[P2_bfe_4:%rd[0-9]+]], [[P2_or_5]], 8, 24;
+; CHECK-DAG: bfe.u64 [[P2_bfe_5:%rd[0-9]+]], [[P2_or_5]], 16, 16;
+; CHECK-DAG: bfe.u64 [[P2_bfe_6:%rd[0-9]+]], [[P2_or_5]], 24, 8;
+; CHECK: { // callseq
+; CHECK: .param .align 8 .b8 param0[32];
+; CHECK-DAG: st.param.b64 [param0+0], [[P0]];
+; CHECK-DAG: st.param.b8 [param0+9], [[P2]];
+; CHECK-DAG: st.param.b8 [param0+10], [[P2_shr_1]];
+; CHECK-DAG: st.param.b8 [param0+11], [[P2_shr_2]];
+; CHECK-DAG: st.param.b8 [param0+12], [[P2_shr_3]];
+; CHECK-DAG: st.param.b8 [param0+13], [[P2_or_5]];
+; CHECK-DAG: st.param.b8 [param0+14], [[P2_bfe_4]];
+; CHECK-DAG: st.param.b8 [param0+15], [[P2_bfe_5]];
+; CHECK-DAG: st.param.b8 [param0+16], [[P2_bfe_6]];
+; CHECK: .param .align 8 .b8 retval0[32];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_s_i8i64p,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-DAG: ld.param.b64 [[R0:%rd[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+9];
+; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+10];
+; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+11];
+; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+12];
+; CHECK-DAG: ld.param.b8 [[R2_4:%rs[0-9]+]], [retval0+13];
+; CHECK-DAG: ld.param.b8 [[R2_5:%rs[0-9]+]], [retval0+14];
+; CHECK-DAG: ld.param.b8 [[R2_6:%rs[0-9]+]], [retval0+15];
+; CHECK-DAG: ld.param.b8 [[R2_7:%rs[0-9]+]], [retval0+16];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.b64 [func_retval0+0], [[R0]];
+; CHECK-DAG: st.param.b8 [func_retval0+9],
+; CHECK-DAG: st.param.b8 [func_retval0+10],
+; CHECK-DAG: st.param.b8 [func_retval0+11],
+; CHECK-DAG: st.param.b8 [func_retval0+12],
+; CHECK-DAG: st.param.b8 [func_retval0+13],
+; CHECK-DAG: st.param.b8 [func_retval0+14],
+; CHECK-DAG: st.param.b8 [func_retval0+15],
+; CHECK-DAG: st.param.b8 [func_retval0+16],
+; CHECK: ret;
+
+define %s_i8i64p @test_s_i8i64p(%s_i8i64p %a) {
+ %r = tail call %s_i8i64p @test_s_i8i64p(%s_i8i64p %a)
+ ret %s_i8i64p %r
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[16])
+; CHECK-LABEL: test_s_i8f16p(
+; CHECK: .param .align 8 .b8 test_s_i8f16p_param_0[16]
+; CHECK-DAG: ld.param.b16 [[P0:%rs[0-9]+]], [test_s_i8f16p_param_0];
+; CHECK-DAG: ld.param.u8 [[P2_0:%rs[0-9]+]], [test_s_i8f16p_param_0+3];
+; CHECK-DAG: ld.param.u8 [[P2_1:%rs[0-9]+]], [test_s_i8f16p_param_0+4];
+; CHECK-DAG: shl.b16 [[P2_1_shl:%rs[0-9]+]], [[P2_1]], 8;
+; CHECK-DAG: or.b16 [[P2_1_or:%rs[0-9]+]], [[P2_1_shl]], [[P2_0]];
+; CHECK: { // callseq
+; CHECK: .param .align 8 .b8 param0[16];
+; CHECK-DAG: st.param.b16 [param0+0], [[P0]];
+; CHECK-DAG: st.param.b8 [param0+3], [[P2_1_or]];
+; CHECK-DAG: st.param.b8 [param0+4], [[P2_1]];
+; CHECK: .param .align 8 .b8 retval0[16];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_s_i8f16p,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-DAG: ld.param.b16 [[R0:%rs[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.b8 [[R2I_0:%rs[0-9]+]], [retval0+3];
+; CHECK-DAG: ld.param.b8 [[R2I_1:%rs[0-9]+]], [retval0+4];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.b16 [func_retval0+0], [[R0]];
+; CHECK-DAG: shl.b16 [[R2I_1_shl:%rs[0-9]+]], [[R2I_1]], 8;
+; CHECK-DAG: and.b16 [[R2I_0_and:%rs[0-9]+]], [[R2I_0]], 255;
+; CHECK-DAG: or.b16 [[R2I:%rs[0-9]+]], [[R2I_0_and]], [[R2I_1_shl]];
+; CHECK-DAG: st.param.b8 [func_retval0+3], [[R2I]];
+; CHECK-DAG: and.b16 [[R2I_1_and:%rs[0-9]+]], [[R2I_1]], 255;
+; CHECK-DAG: st.param.b8 [func_retval0+4], [[R2I_1_and]];
+; CHECK: ret;
+
+define %s_i8f16p @test_s_i8f16p(%s_i8f16p %a) {
+ %r = tail call %s_i8f16p @test_s_i8f16p(%s_i8f16p %a)
+ ret %s_i8f16p %r
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[24])
+; CHECK-LABEL: test_s_i8f16x2p(
+; CHECK: .param .align 8 .b8 test_s_i8f16x2p_param_0[24]
+; CHECK-DAG: ld.param.b32 [[P0:%r[0-9]+]], [test_s_i8f16x2p_param_0];
+; CHECK-DAG: ld.param.u8 [[P2_0:%r[0-9]+]], [test_s_i8f16x2p_param_0+5];
+; CHECK-DAG: ld.param.u8 [[P2_1:%r[0-9]+]], [test_s_i8f16x2p_param_0+6];
+; CHECK-DAG: ld.param.u8 [[P2_2:%r[0-9]+]], [test_s_i8f16x2p_param_0+7];
+; CHECK-DAG: ld.param.u8 [[P2_3:%r[0-9]+]], [test_s_i8f16x2p_param_0+8];
+; CHECK-DAG: shl.b32 [[P2_1_shl:%r[0-9]+]], [[P2_1]], 8;
+; CHECK-DAG: shl.b32 [[P2_2_shl:%r[0-9]+]], [[P2_2]], 16;
+; CHECK-DAG: shl.b32 [[P2_3_shl:%r[0-9]+]], [[P2_3]], 24;
+; CHECK-DAG: or.b32 [[P2_or:%r[0-9]+]], [[P2_1_shl]], [[P2_0]];
+; CHECK-DAG: or.b32 [[P2_or_1:%r[0-9]+]], [[P2_3_shl]], [[P2_2_shl]];
+; CHECK-DAG: or.b32 [[P2:%r[0-9]+]], [[P2_or_1]], [[P2_or]];
+; CHECK-DAG: shr.u32 [[P2_1_shr:%r[0-9]+]], [[P2]], 8;
+; CHECK-DAG: shr.u32 [[P2_2_shr:%r[0-9]+]], [[P2_or_1]], 16;
+; CHECK: { // callseq
+; CHECK-DAG: .param .align 8 .b8 param0[24];
+; CHECK-DAG: st.param.b32 [param0+0], [[P0]];
+; CHECK-DAG: st.param.b8 [param0+5], [[P2]];
+; CHECK-DAG: st.param.b8 [param0+6], [[P2_1_shr]];
+; CHECK-DAG: st.param.b8 [param0+7], [[P2_2_shr]];
+; CHECK-DAG: st.param.b8 [param0+8], [[P2_3]];
+; CHECK: .param .align 8 .b8 retval0[24];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_s_i8f16x2p,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-DAG: ld.param.b32 [[R0:%r[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+5];
+; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+6];
+; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+7];
+; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+8];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.b32 [func_retval0+0], [[R0]];
+; CHECK-DAG: st.param.b8 [func_retval0+5],
+; CHECK-DAG: st.param.b8 [func_retval0+6],
+; CHECK-DAG: st.param.b8 [func_retval0+7],
+; CHECK-DAG: st.param.b8 [func_retval0+8],
+; CHECK: ret;
+
+define %s_i8f16x2p @test_s_i8f16x2p(%s_i8f16x2p %a) {
+ %r = tail call %s_i8f16x2p @test_s_i8f16x2p(%s_i8f16x2p %a)
+ ret %s_i8f16x2p %r
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[24])
+; CHECK-LABEL: test_s_i8f32p(
+; CHECK: .param .align 8 .b8 test_s_i8f32p_param_0[24]
+; CHECK-DAG: ld.param.f32 [[P0:%f[0-9]+]], [test_s_i8f32p_param_0];
+; CHECK-DAG: ld.param.u8 [[P2_0:%r[0-9]+]], [test_s_i8f32p_param_0+5];
+; CHECK-DAG: ld.param.u8 [[P2_1:%r[0-9]+]], [test_s_i8f32p_param_0+6];
+; CHECK-DAG: ld.param.u8 [[P2_2:%r[0-9]+]], [test_s_i8f32p_param_0+7];
+; CHECK-DAG: ld.param.u8 [[P2_3:%r[0-9]+]], [test_s_i8f32p_param_0+8];
+; CHECK-DAG: shl.b32 [[P2_1_shl:%r[0-9]+]], [[P2_1]], 8;
+; CHECK-DAG: shl.b32 [[P2_2_shl:%r[0-9]+]], [[P2_2]], 16;
+; CHECK-DAG: shl.b32 [[P2_3_shl:%r[0-9]+]], [[P2_3]], 24;
+; CHECK-DAG: or.b32 [[P2_or:%r[0-9]+]], [[P2_1_shl]], [[P2_0]];
+; CHECK-DAG: or.b32 [[P2_or_1:%r[0-9]+]], [[P2_3_shl]], [[P2_2_shl]];
+; CHECK-DAG: or.b32 [[P2:%r[0-9]+]], [[P2_or_1]], [[P2_or]];
+; CHECK-DAG: shr.u32 [[P2_1_shr:%r[0-9]+]], [[P2]], 8;
+; CHECK-DAG: shr.u32 [[P2_2_shr:%r[0-9]+]], [[P2_or_1]], 16;
+; CHECK: { // callseq
+; CHECK-DAG: .param .align 8 .b8 param0[24];
+; CHECK-DAG: st.param.f32 [param0+0], [[P0]];
+; CHECK-DAG: st.param.b8 [param0+5], [[P2]];
+; CHECK-DAG: st.param.b8 [param0+6], [[P2_1_shr]];
+; CHECK-DAG: st.param.b8 [param0+7], [[P2_2_shr]];
+; CHECK-DAG: st.param.b8 [param0+8], [[P2_3]];
+; CHECK: .param .align 8 .b8 retval0[24];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_s_i8f32p,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-DAG: ld.param.f32 [[R0:%f[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+5];
+; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+6];
+; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+7];
+; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+8];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.f32 [func_retval0+0], [[R0]];
+; CHECK-DAG: st.param.b8 [func_retval0+5],
+; CHECK-DAG: st.param.b8 [func_retval0+6],
+; CHECK-DAG: st.param.b8 [func_retval0+7],
+; CHECK-DAG: st.param.b8 [func_retval0+8],
+; CHECK: ret;
+
+define %s_i8f32p @test_s_i8f32p(%s_i8f32p %a) {
+ %r = tail call %s_i8f32p @test_s_i8f32p(%s_i8f32p %a)
+ ret %s_i8f32p %r
+}
+
+; CHECK: .visible .func (.param .align 8 .b8 func_retval0[32])
+; CHECK-LABEL: test_s_i8f64p(
+; CHECK: .param .align 8 .b8 test_s_i8f64p_param_0[32]
+; CHECK-DAG: ld.param.f64 [[P0:%fd[0-9]+]], [test_s_i8f64p_param_0];
+; CHECK-DAG: ld.param.u8 [[P2_0:%rd[0-9]+]], [test_s_i8f64p_param_0+9];
+; CHECK-DAG: ld.param.u8 [[P2_1:%rd[0-9]+]], [test_s_i8f64p_param_0+10];
+; CHECK-DAG: ld.param.u8 [[P2_2:%rd[0-9]+]], [test_s_i8f64p_param_0+11];
+; CHECK-DAG: ld.param.u8 [[P2_3:%rd[0-9]+]], [test_s_i8f64p_param_0+12];
+; CHECK-DAG: ld.param.u8 [[P2_4:%rd[0-9]+]], [test_s_i8f64p_param_0+13];
+; CHECK-DAG: ld.param.u8 [[P2_5:%rd[0-9]+]], [test_s_i8f64p_param_0+14];
+; CHECK-DAG: ld.param.u8 [[P2_6:%rd[0-9]+]], [test_s_i8f64p_param_0+15];
+; CHECK-DAG: ld.param.u8 [[P2_7:%rd[0-9]+]], [test_s_i8f64p_param_0+16];
+; CHECK-DAG: shl.b64 [[P2_1_shl:%rd[0-9]+]], [[P2_1]], 8;
+; CHECK-DAG: shl.b64 [[P2_2_shl:%rd[0-9]+]], [[P2_2]], 16;
+; CHECK-DAG: shl.b64 [[P2_3_shl:%rd[0-9]+]], [[P2_3]], 24;
+; CHECK-DAG: or.b64 [[P2_or_0:%rd[0-9]+]], [[P2_1_shl]], [[P2_0]];
+; CHECK-DAG: or.b64 [[P2_or_1:%rd[0-9]+]], [[P2_3_shl]], [[P2_2_shl]];
+; CHECK-DAG: or.b64 [[P2_or_2:%rd[0-9]+]], [[P2_or_1]], [[P2_or_0]];
+; CHECK-DAG: shl.b64 [[P2_5_shl:%rd[0-9]+]], [[P2_5]], 8;
+; CHECK-DAG: shl.b64 [[P2_6_shl:%rd[0-9]+]], [[P2_6]], 16;
+; CHECK-DAG: shl.b64 [[P2_7_shl:%rd[0-9]+]], [[P2_7]], 24;
+; CHECK-DAG: or.b64 [[P2_or_3:%rd[0-9]+]], [[P2_5_shl]], [[P2_4]];
+; CHECK-DAG: or.b64 [[P2_or_4:%rd[0-9]+]], [[P2_7_shl]], [[P2_6_shl]];
+; CHECK-DAG: or.b64 [[P2_or_5:%rd[0-9]+]], [[P2_or_4]], [[P2_or_3]];
+; CHECK-DAG: shl.b64 [[P2_or_shl:%rd[0-9]+]], [[P2_or_5]], 32;
+; CHECK-DAG: or.b64 [[P2:%rd[0-9]+]], [[P2_or_shl]], [[P2_or_2]];
+; CHECK-DAG: shr.u64 [[P2_shr_1:%rd[0-9]+]], [[P2]], 8;
+; CHECK-DAG: shr.u64 [[P2_shr_2:%rd[0-9]+]], [[P2]], 16;
+; CHECK-DAG: shr.u64 [[P2_shr_3:%rd[0-9]+]], [[P2]], 24;
+; CHECK-DAG: bfe.u64 [[P2_bfe_4:%rd[0-9]+]], [[P2_or_5]], 8, 24;
+; CHECK-DAG: bfe.u64 [[P2_bfe_5:%rd[0-9]+]], [[P2_or_5]], 16, 16;
+; CHECK-DAG: bfe.u64 [[P2_bfe_6:%rd[0-9]+]], [[P2_or_5]], 24, 8;
+; CHECK: { // callseq
+; CHECK: .param .align 8 .b8 param0[32];
+; CHECK-DAG: st.param.f64 [param0+0], [[P0]];
+; CHECK-DAG: st.param.b8 [param0+9], [[P2]];
+; CHECK-DAG: st.param.b8 [param0+10], [[P2_shr_1]];
+; CHECK-DAG: st.param.b8 [param0+11], [[P2_shr_2]];
+; CHECK-DAG: st.param.b8 [param0+12], [[P2_shr_3]];
+; CHECK-DAG: st.param.b8 [param0+13], [[P2_or_5]];
+; CHECK-DAG: st.param.b8 [param0+14], [[P2_bfe_4]];
+; CHECK-DAG: st.param.b8 [param0+15], [[P2_bfe_5]];
+; CHECK-DAG: st.param.b8 [param0+16], [[P2_bfe_6]];
+; CHECK: .param .align 8 .b8 retval0[32];
+; CHECK-NEXT: call.uni (retval0),
+; CHECK-NEXT: test_s_i8f64p,
+; CHECK-NEXT: (
+; CHECK-NEXT: param0
+; CHECK-NEXT: );
+; CHECK-DAG: ld.param.f64 [[R0:%fd[0-9]+]], [retval0+0];
+; CHECK-DAG: ld.param.b8 [[R2_0:%rs[0-9]+]], [retval0+9];
+; CHECK-DAG: ld.param.b8 [[R2_1:%rs[0-9]+]], [retval0+10];
+; CHECK-DAG: ld.param.b8 [[R2_2:%rs[0-9]+]], [retval0+11];
+; CHECK-DAG: ld.param.b8 [[R2_3:%rs[0-9]+]], [retval0+12];
+; CHECK-DAG: ld.param.b8 [[R2_4:%rs[0-9]+]], [retval0+13];
+; CHECK-DAG: ld.param.b8 [[R2_5:%rs[0-9]+]], [retval0+14];
+; CHECK-DAG: ld.param.b8 [[R2_6:%rs[0-9]+]], [retval0+15];
+; CHECK-DAG: ld.param.b8 [[R2_7:%rs[0-9]+]], [retval0+16];
+; CHECK: } // callseq
+; CHECK-DAG: st.param.f64 [func_retval0+0], [[R0]];
+; CHECK-DAG: st.param.b8 [func_retval0+9],
+; CHECK-DAG: st.param.b8 [func_retval0+10],
+; CHECK-DAG: st.param.b8 [func_retval0+11],
+; CHECK-DAG: st.param.b8 [func_retval0+12],
+; CHECK-DAG: st.param.b8 [func_retval0+13],
+; CHECK-DAG: st.param.b8 [func_retval0+14],
+; CHECK-DAG: st.param.b8 [func_retval0+15],
+; CHECK-DAG: st.param.b8 [func_retval0+16],
+; CHECK: ret;
+
+define %s_i8f64p @test_s_i8f64p(%s_i8f64p %a) {
+ %r = tail call %s_i8f64p @test_s_i8f64p(%s_i8f64p %a)
+ ret %s_i8f64p %r
+}
diff --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
index 91e73992bdfa..3c2e84689c97 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
-; RUN: -riscv-enable-sink-fold | FileCheck -check-prefix=RV32I %s
+; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -verify-machineinstrs -code-model=medium < %s \
-; RUN: -riscv-enable-sink-fold | FileCheck -check-prefix=RV32I-MEDIUM %s
+; RUN: | FileCheck -check-prefix=RV32I-MEDIUM %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
-; RUN: -riscv-enable-sink-fold | FileCheck -check-prefix=RV64I %s
+; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs -code-model=medium < %s \
-; RUN: -riscv-enable-sink-fold | FileCheck -check-prefix=RV64I-MEDIUM %s
+; RUN: | FileCheck -check-prefix=RV64I-MEDIUM %s
; We can often fold an ADDI into the offset of load/store instructions:
; (load (addi base, off1), off2) -> (load base, off1+off2)
diff --git a/llvm/test/CodeGen/RISCV/make-compressible.mir b/llvm/test/CodeGen/RISCV/make-compressible.mir
index 2105a13bc8c7..03da38a6863e 100644
--- a/llvm/test/CodeGen/RISCV/make-compressible.mir
+++ b/llvm/test/CodeGen/RISCV/make-compressible.mir
@@ -1,8 +1,14 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -o - %s -mtriple=riscv32 -mattr=+c,+f,+d -simplify-mir \
-# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefix=RV32 %s
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV32,RV32C %s
# RUN: llc -o - %s -mtriple=riscv64 -mattr=+c,+f,+d -simplify-mir \
-# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefix=RV64 %s
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV64,RV64C %s
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+d,+zcf -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV32,RV32ZCF %s
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+d,+zca -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV32,RV32ZCA %s
+# RUN: llc -o - %s -mtriple=riscv64 -mattr=+d,+zca -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=RV64,RV64ZCA %s
--- |
define void @store_common_value(ptr %a, ptr %b, ptr %c) #0 {
@@ -288,7 +294,7 @@
ret { double, double } %3
}
- attributes #0 = { minsize "target-features"="+c,+f,+d" }
+ attributes #0 = { minsize }
...
---
@@ -306,6 +312,7 @@ body: |
; RV32-NEXT: SW $x13, killed renamable $x11, 0 :: (store (s32) into %ir.b)
; RV32-NEXT: SW $x13, killed renamable $x12, 0 :: (store (s32) into %ir.c)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_value
; RV64: liveins: $x10, $x11, $x12
; RV64-NEXT: {{ $}}
@@ -327,14 +334,15 @@ body: |
bb.0.entry:
liveins: $x10, $x11, $x12, $f16_f
- ; RV32-LABEL: name: store_common_value_float
- ; RV32: liveins: $x10, $x11, $x12, $f16_f
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $f15_f = FSGNJ_S $f16_f, $f16_f
- ; RV32-NEXT: FSW $f15_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
- ; RV32-NEXT: FSW $f15_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
- ; RV32-NEXT: FSW killed $f15_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
- ; RV32-NEXT: PseudoRET
+ ; RV32C-LABEL: name: store_common_value_float
+ ; RV32C: liveins: $x10, $x11, $x12, $f16_f
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $f15_f = FSGNJ_S $f16_f, $f16_f
+ ; RV32C-NEXT: FSW $f15_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+ ; RV32C-NEXT: FSW $f15_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
+ ; RV32C-NEXT: FSW killed $f15_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
+ ; RV32C-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_value_float
; RV64: liveins: $x10, $x11, $x12, $f16_f
; RV64-NEXT: {{ $}}
@@ -342,6 +350,23 @@ body: |
; RV64-NEXT: FSW renamable $f16_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
; RV64-NEXT: FSW killed renamable $f16_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
; RV64-NEXT: PseudoRET
+ ;
+ ; RV32ZCF-LABEL: name: store_common_value_float
+ ; RV32ZCF: liveins: $x10, $x11, $x12, $f16_f
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: $f15_f = FSGNJ_S $f16_f, $f16_f
+ ; RV32ZCF-NEXT: FSW $f15_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+ ; RV32ZCF-NEXT: FSW $f15_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
+ ; RV32ZCF-NEXT: FSW killed $f15_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
+ ; RV32ZCF-NEXT: PseudoRET
+ ;
+ ; RV32ZCA-LABEL: name: store_common_value_float
+ ; RV32ZCA: liveins: $x10, $x11, $x12, $f16_f
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: FSW renamable $f16_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+ ; RV32ZCA-NEXT: FSW renamable $f16_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
+ ; RV32ZCA-NEXT: FSW killed renamable $f16_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
+ ; RV32ZCA-NEXT: PseudoRET
FSW renamable $f16_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
FSW renamable $f16_f, killed renamable $x11, 0 :: (store (s32) into %ir.b)
FSW killed renamable $f16_f, killed renamable $x12, 0 :: (store (s32) into %ir.c)
@@ -355,22 +380,47 @@ body: |
bb.0.entry:
liveins: $x10, $x11, $x12, $f16_d
- ; RV32-LABEL: name: store_common_value_double
- ; RV32: liveins: $x10, $x11, $x12, $f16_d
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d
- ; RV32-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
- ; RV32-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
- ; RV32-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
- ; RV32-NEXT: PseudoRET
- ; RV64-LABEL: name: store_common_value_double
- ; RV64: liveins: $x10, $x11, $x12, $f16_d
- ; RV64-NEXT: {{ $}}
- ; RV64-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d
- ; RV64-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
- ; RV64-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
- ; RV64-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
- ; RV64-NEXT: PseudoRET
+ ; RV32C-LABEL: name: store_common_value_double
+ ; RV32C: liveins: $x10, $x11, $x12, $f16_d
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d
+ ; RV32C-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
+ ; RV32C-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
+ ; RV32C-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
+ ; RV32C-NEXT: PseudoRET
+ ;
+ ; RV64C-LABEL: name: store_common_value_double
+ ; RV64C: liveins: $x10, $x11, $x12, $f16_d
+ ; RV64C-NEXT: {{ $}}
+ ; RV64C-NEXT: $f15_d = FSGNJ_D $f16_d, $f16_d
+ ; RV64C-NEXT: FSD $f15_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
+ ; RV64C-NEXT: FSD $f15_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
+ ; RV64C-NEXT: FSD killed $f15_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
+ ; RV64C-NEXT: PseudoRET
+ ;
+ ; RV32ZCF-LABEL: name: store_common_value_double
+ ; RV32ZCF: liveins: $x10, $x11, $x12, $f16_d
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
+ ; RV32ZCF-NEXT: FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
+ ; RV32ZCF-NEXT: FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
+ ; RV32ZCF-NEXT: PseudoRET
+ ;
+ ; RV32ZCA-LABEL: name: store_common_value_double
+ ; RV32ZCA: liveins: $x10, $x11, $x12, $f16_d
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
+ ; RV32ZCA-NEXT: FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
+ ; RV32ZCA-NEXT: FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
+ ; RV32ZCA-NEXT: PseudoRET
+ ;
+ ; RV64ZCA-LABEL: name: store_common_value_double
+ ; RV64ZCA: liveins: $x10, $x11, $x12, $f16_d
+ ; RV64ZCA-NEXT: {{ $}}
+ ; RV64ZCA-NEXT: FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
+ ; RV64ZCA-NEXT: FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
+ ; RV64ZCA-NEXT: FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
+ ; RV64ZCA-NEXT: PseudoRET
FSD renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
FSD renamable $f16_d, killed renamable $x11, 0 :: (store (s64) into %ir.b)
FSD killed renamable $f16_d, killed renamable $x12, 0 :: (store (s64) into %ir.c)
@@ -395,6 +445,7 @@ body: |
; RV32-NEXT: renamable $x10 = ADDI $x0, 5
; RV32-NEXT: SW killed renamable $x10, killed $x11, 0 :: (volatile store (s32) into %ir.p)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_ptr
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -432,6 +483,7 @@ body: |
; RV32-NEXT: SW killed renamable $x10, $x11, 0 :: (volatile store (s32) into %ir.p)
; RV32-NEXT: SW killed $x11, $x11, 0 :: (volatile store (s32) into %ir.q)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_ptr_self
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -457,14 +509,15 @@ body: |
bb.0.entry:
liveins: $x16, $f10_f, $f11_f, $f12_f
- ; RV32-LABEL: name: store_common_ptr_float
- ; RV32: liveins: $x16, $f10_f, $f11_f, $f12_f
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x10 = ADDI $x16, 0
- ; RV32-NEXT: FSW killed renamable $f10_f, $x10, 0 :: (volatile store (s32) into %ir.p)
- ; RV32-NEXT: FSW killed renamable $f11_f, $x10, 0 :: (volatile store (s32) into %ir.p)
- ; RV32-NEXT: FSW killed renamable $f12_f, killed $x10, 0 :: (volatile store (s32) into %ir.p)
- ; RV32-NEXT: PseudoRET
+ ; RV32C-LABEL: name: store_common_ptr_float
+ ; RV32C: liveins: $x16, $f10_f, $f11_f, $f12_f
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x10 = ADDI $x16, 0
+ ; RV32C-NEXT: FSW killed renamable $f10_f, $x10, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32C-NEXT: FSW killed renamable $f11_f, $x10, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32C-NEXT: FSW killed renamable $f12_f, killed $x10, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32C-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_ptr_float
; RV64: liveins: $x16, $f10_f, $f11_f, $f12_f
; RV64-NEXT: {{ $}}
@@ -472,6 +525,23 @@ body: |
; RV64-NEXT: FSW killed renamable $f11_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p)
; RV64-NEXT: FSW killed renamable $f12_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
; RV64-NEXT: PseudoRET
+ ;
+ ; RV32ZCF-LABEL: name: store_common_ptr_float
+ ; RV32ZCF: liveins: $x16, $f10_f, $f11_f, $f12_f
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: $x10 = ADDI $x16, 0
+ ; RV32ZCF-NEXT: FSW killed renamable $f10_f, $x10, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32ZCF-NEXT: FSW killed renamable $f11_f, $x10, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32ZCF-NEXT: FSW killed renamable $f12_f, killed $x10, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32ZCF-NEXT: PseudoRET
+ ;
+ ; RV32ZCA-LABEL: name: store_common_ptr_float
+ ; RV32ZCA: liveins: $x16, $f10_f, $f11_f, $f12_f
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: FSW killed renamable $f10_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32ZCA-NEXT: FSW killed renamable $f11_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32ZCA-NEXT: FSW killed renamable $f12_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ ; RV32ZCA-NEXT: PseudoRET
FSW killed renamable $f10_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p)
FSW killed renamable $f11_f, renamable $x16, 0 :: (volatile store (s32) into %ir.p)
FSW killed renamable $f12_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
@@ -485,22 +555,47 @@ body: |
bb.0.entry:
liveins: $x16, $f10_d, $f11_d, $f12_d
- ; RV32-LABEL: name: store_common_ptr_double
- ; RV32: liveins: $x16, $f10_d, $f11_d, $f12_d
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x10 = ADDI $x16, 0
- ; RV32-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p)
- ; RV32-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p)
- ; RV32-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p)
- ; RV32-NEXT: PseudoRET
- ; RV64-LABEL: name: store_common_ptr_double
- ; RV64: liveins: $x16, $f10_d, $f11_d, $f12_d
- ; RV64-NEXT: {{ $}}
- ; RV64-NEXT: $x10 = ADDI $x16, 0
- ; RV64-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p)
- ; RV64-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p)
- ; RV64-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p)
- ; RV64-NEXT: PseudoRET
+ ; RV32C-LABEL: name: store_common_ptr_double
+ ; RV32C: liveins: $x16, $f10_d, $f11_d, $f12_d
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x10 = ADDI $x16, 0
+ ; RV32C-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32C-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32C-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32C-NEXT: PseudoRET
+ ;
+ ; RV64C-LABEL: name: store_common_ptr_double
+ ; RV64C: liveins: $x16, $f10_d, $f11_d, $f12_d
+ ; RV64C-NEXT: {{ $}}
+ ; RV64C-NEXT: $x10 = ADDI $x16, 0
+ ; RV64C-NEXT: FSD killed renamable $f10_d, $x10, 0 :: (volatile store (s64) into %ir.p)
+ ; RV64C-NEXT: FSD killed renamable $f11_d, $x10, 0 :: (volatile store (s64) into %ir.p)
+ ; RV64C-NEXT: FSD killed renamable $f12_d, killed $x10, 0 :: (volatile store (s64) into %ir.p)
+ ; RV64C-NEXT: PseudoRET
+ ;
+ ; RV32ZCF-LABEL: name: store_common_ptr_double
+ ; RV32ZCF: liveins: $x16, $f10_d, $f11_d, $f12_d
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32ZCF-NEXT: FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32ZCF-NEXT: FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32ZCF-NEXT: PseudoRET
+ ;
+ ; RV32ZCA-LABEL: name: store_common_ptr_double
+ ; RV32ZCA: liveins: $x16, $f10_d, $f11_d, $f12_d
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32ZCA-NEXT: FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32ZCA-NEXT: FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV32ZCA-NEXT: PseudoRET
+ ;
+ ; RV64ZCA-LABEL: name: store_common_ptr_double
+ ; RV64ZCA: liveins: $x16, $f10_d, $f11_d, $f12_d
+ ; RV64ZCA-NEXT: {{ $}}
+ ; RV64ZCA-NEXT: FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV64ZCA-NEXT: FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV64ZCA-NEXT: FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
+ ; RV64ZCA-NEXT: PseudoRET
FSD killed renamable $f10_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
FSD killed renamable $f11_d, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
FSD killed renamable $f12_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
@@ -522,6 +617,7 @@ body: |
; RV32-NEXT: dead renamable $x10 = LW $x11, 0 :: (volatile load (s32) from %ir.p)
; RV32-NEXT: dead renamable $x10 = LW killed $x11, 0 :: (volatile load (s32) from %ir.p)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: load_common_ptr
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -543,14 +639,15 @@ body: |
bb.0.entry:
liveins: $x16
- ; RV32-LABEL: name: load_common_ptr_float
- ; RV32: liveins: $x16
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x10 = ADDI $x16, 0
- ; RV32-NEXT: renamable $f10_f = FLW $x10, 0 :: (load (s32) from %ir.g)
- ; RV32-NEXT: renamable $f11_f = FLW $x10, 4 :: (load (s32) from %ir.arrayidx1)
- ; RV32-NEXT: renamable $f12_f = FLW killed $x10, 8 :: (load (s32) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ; RV32C-LABEL: name: load_common_ptr_float
+ ; RV32C: liveins: $x16
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x10 = ADDI $x16, 0
+ ; RV32C-NEXT: renamable $f10_f = FLW $x10, 0 :: (load (s32) from %ir.g)
+ ; RV32C-NEXT: renamable $f11_f = FLW $x10, 4 :: (load (s32) from %ir.arrayidx1)
+ ; RV32C-NEXT: renamable $f12_f = FLW killed $x10, 8 :: (load (s32) from %ir.arrayidx2)
+ ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ;
; RV64-LABEL: name: load_common_ptr_float
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -558,6 +655,23 @@ body: |
; RV64-NEXT: renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1)
; RV64-NEXT: renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2)
; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ;
+ ; RV32ZCF-LABEL: name: load_common_ptr_float
+ ; RV32ZCF: liveins: $x16
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: $x10 = ADDI $x16, 0
+ ; RV32ZCF-NEXT: renamable $f10_f = FLW $x10, 0 :: (load (s32) from %ir.g)
+ ; RV32ZCF-NEXT: renamable $f11_f = FLW $x10, 4 :: (load (s32) from %ir.arrayidx1)
+ ; RV32ZCF-NEXT: renamable $f12_f = FLW killed $x10, 8 :: (load (s32) from %ir.arrayidx2)
+ ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ;
+ ; RV32ZCA-LABEL: name: load_common_ptr_float
+ ; RV32ZCA: liveins: $x16
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: renamable $f10_f = FLW renamable $x16, 0 :: (load (s32) from %ir.g)
+ ; RV32ZCA-NEXT: renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1)
+ ; RV32ZCA-NEXT: renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2)
+ ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
renamable $f10_f = FLW renamable $x16, 0 :: (load (s32) from %ir.g)
renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1)
renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2)
@@ -571,22 +685,47 @@ body: |
bb.0.entry:
liveins: $x16
- ; RV32-LABEL: name: load_common_ptr_double
- ; RV32: liveins: $x16
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x10 = ADDI $x16, 0
- ; RV32-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g)
- ; RV32-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1)
- ; RV32-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
- ; RV64-LABEL: name: load_common_ptr_double
- ; RV64: liveins: $x16
- ; RV64-NEXT: {{ $}}
- ; RV64-NEXT: $x10 = ADDI $x16, 0
- ; RV64-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g)
- ; RV64-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1)
- ; RV64-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2)
- ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ; RV32C-LABEL: name: load_common_ptr_double
+ ; RV32C: liveins: $x16
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x10 = ADDI $x16, 0
+ ; RV32C-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g)
+ ; RV32C-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1)
+ ; RV32C-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2)
+ ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV64C-LABEL: name: load_common_ptr_double
+ ; RV64C: liveins: $x16
+ ; RV64C-NEXT: {{ $}}
+ ; RV64C-NEXT: $x10 = ADDI $x16, 0
+ ; RV64C-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g)
+ ; RV64C-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1)
+ ; RV64C-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2)
+ ; RV64C-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV32ZCF-LABEL: name: load_common_ptr_double
+ ; RV32ZCF: liveins: $x16
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g)
+ ; RV32ZCF-NEXT: renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1)
+ ; RV32ZCF-NEXT: renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2)
+ ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV32ZCA-LABEL: name: load_common_ptr_double
+ ; RV32ZCA: liveins: $x16
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g)
+ ; RV32ZCA-NEXT: renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1)
+ ; RV32ZCA-NEXT: renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2)
+ ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV64ZCA-LABEL: name: load_common_ptr_double
+ ; RV64ZCA: liveins: $x16
+ ; RV64ZCA-NEXT: {{ $}}
+ ; RV64ZCA-NEXT: renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g)
+ ; RV64ZCA-NEXT: renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1)
+ ; RV64ZCA-NEXT: renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2)
+ ; RV64ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g)
renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1)
renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2)
@@ -613,6 +752,7 @@ body: |
; RV32-NEXT: renamable $x11 = ADDI $x0, 7
; RV32-NEXT: SW killed renamable $x11, killed $x12, 28 :: (volatile store (s32) into %ir.3)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_large_offset
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -644,15 +784,16 @@ body: |
bb.0.entry:
liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f
- ; RV32-LABEL: name: store_large_offset_float
- ; RV32: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x11 = ADDI $x10, 384
- ; RV32-NEXT: FSW killed renamable $f10_f, $x11, 16 :: (volatile store (s32) into %ir.0)
- ; RV32-NEXT: FSW killed renamable $f11_f, $x11, 20 :: (volatile store (s32) into %ir.1)
- ; RV32-NEXT: FSW killed renamable $f12_f, $x11, 24 :: (volatile store (s32) into %ir.2)
- ; RV32-NEXT: FSW killed renamable $f13_f, killed $x11, 28 :: (volatile store (s32) into %ir.3)
- ; RV32-NEXT: PseudoRET
+ ; RV32C-LABEL: name: store_large_offset_float
+ ; RV32C: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x11 = ADDI $x10, 384
+ ; RV32C-NEXT: FSW killed renamable $f10_f, $x11, 16 :: (volatile store (s32) into %ir.0)
+ ; RV32C-NEXT: FSW killed renamable $f11_f, $x11, 20 :: (volatile store (s32) into %ir.1)
+ ; RV32C-NEXT: FSW killed renamable $f12_f, $x11, 24 :: (volatile store (s32) into %ir.2)
+ ; RV32C-NEXT: FSW killed renamable $f13_f, killed $x11, 28 :: (volatile store (s32) into %ir.3)
+ ; RV32C-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_large_offset_float
; RV64: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f
; RV64-NEXT: {{ $}}
@@ -661,6 +802,25 @@ body: |
; RV64-NEXT: FSW killed renamable $f12_f, renamable $x10, 408 :: (volatile store (s32) into %ir.2)
; RV64-NEXT: FSW killed renamable $f13_f, killed renamable $x10, 412 :: (volatile store (s32) into %ir.3)
; RV64-NEXT: PseudoRET
+ ;
+ ; RV32ZCF-LABEL: name: store_large_offset_float
+ ; RV32ZCF: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: $x11 = ADDI $x10, 384
+ ; RV32ZCF-NEXT: FSW killed renamable $f10_f, $x11, 16 :: (volatile store (s32) into %ir.0)
+ ; RV32ZCF-NEXT: FSW killed renamable $f11_f, $x11, 20 :: (volatile store (s32) into %ir.1)
+ ; RV32ZCF-NEXT: FSW killed renamable $f12_f, $x11, 24 :: (volatile store (s32) into %ir.2)
+ ; RV32ZCF-NEXT: FSW killed renamable $f13_f, killed $x11, 28 :: (volatile store (s32) into %ir.3)
+ ; RV32ZCF-NEXT: PseudoRET
+ ;
+ ; RV32ZCA-LABEL: name: store_large_offset_float
+ ; RV32ZCA: liveins: $x10, $f10_f, $f11_f, $f12_f, $f13_f
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: FSW killed renamable $f10_f, renamable $x10, 400 :: (volatile store (s32) into %ir.0)
+ ; RV32ZCA-NEXT: FSW killed renamable $f11_f, renamable $x10, 404 :: (volatile store (s32) into %ir.1)
+ ; RV32ZCA-NEXT: FSW killed renamable $f12_f, renamable $x10, 408 :: (volatile store (s32) into %ir.2)
+ ; RV32ZCA-NEXT: FSW killed renamable $f13_f, killed renamable $x10, 412 :: (volatile store (s32) into %ir.3)
+ ; RV32ZCA-NEXT: PseudoRET
FSW killed renamable $f10_f, renamable $x10, 400 :: (volatile store (s32) into %ir.0)
FSW killed renamable $f11_f, renamable $x10, 404 :: (volatile store (s32) into %ir.1)
FSW killed renamable $f12_f, renamable $x10, 408 :: (volatile store (s32) into %ir.2)
@@ -675,24 +835,52 @@ body: |
bb.0.entry:
liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
- ; RV32-LABEL: name: store_large_offset_double
- ; RV32: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x11 = ADDI $x10, 768
- ; RV32-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0)
- ; RV32-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1)
- ; RV32-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2)
- ; RV32-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3)
- ; RV32-NEXT: PseudoRET
- ; RV64-LABEL: name: store_large_offset_double
- ; RV64: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
- ; RV64-NEXT: {{ $}}
- ; RV64-NEXT: $x11 = ADDI $x10, 768
- ; RV64-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0)
- ; RV64-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1)
- ; RV64-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2)
- ; RV64-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3)
- ; RV64-NEXT: PseudoRET
+ ; RV32C-LABEL: name: store_large_offset_double
+ ; RV32C: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x11 = ADDI $x10, 768
+ ; RV32C-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0)
+ ; RV32C-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1)
+ ; RV32C-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2)
+ ; RV32C-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3)
+ ; RV32C-NEXT: PseudoRET
+ ;
+ ; RV64C-LABEL: name: store_large_offset_double
+ ; RV64C: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
+ ; RV64C-NEXT: {{ $}}
+ ; RV64C-NEXT: $x11 = ADDI $x10, 768
+ ; RV64C-NEXT: FSD killed renamable $f10_d, $x11, 32 :: (volatile store (s64) into %ir.0)
+ ; RV64C-NEXT: FSD killed renamable $f11_d, $x11, 40 :: (volatile store (s64) into %ir.1)
+ ; RV64C-NEXT: FSD killed renamable $f12_d, $x11, 48 :: (volatile store (s64) into %ir.2)
+ ; RV64C-NEXT: FSD killed renamable $f13_d, killed $x11, 56 :: (volatile store (s64) into %ir.3)
+ ; RV64C-NEXT: PseudoRET
+ ;
+ ; RV32ZCF-LABEL: name: store_large_offset_double
+ ; RV32ZCF: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
+ ; RV32ZCF-NEXT: FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1)
+ ; RV32ZCF-NEXT: FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2)
+ ; RV32ZCF-NEXT: FSD killed renamable $f13_d, killed renamable $x10, 824 :: (volatile store (s64) into %ir.3)
+ ; RV32ZCF-NEXT: PseudoRET
+ ;
+ ; RV32ZCA-LABEL: name: store_large_offset_double
+ ; RV32ZCA: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
+ ; RV32ZCA-NEXT: FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1)
+ ; RV32ZCA-NEXT: FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2)
+ ; RV32ZCA-NEXT: FSD killed renamable $f13_d, killed renamable $x10, 824 :: (volatile store (s64) into %ir.3)
+ ; RV32ZCA-NEXT: PseudoRET
+ ;
+ ; RV64ZCA-LABEL: name: store_large_offset_double
+ ; RV64ZCA: liveins: $x10, $f10_d, $f11_d, $f12_d, $f13_d
+ ; RV64ZCA-NEXT: {{ $}}
+ ; RV64ZCA-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
+ ; RV64ZCA-NEXT: FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1)
+ ; RV64ZCA-NEXT: FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2)
+ ; RV64ZCA-NEXT: FSD killed renamable $f13_d, killed renamable $x10, 824 :: (volatile store (s64) into %ir.3)
+ ; RV64ZCA-NEXT: PseudoRET
FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
FSD killed renamable $f11_d, renamable $x10, 808 :: (volatile store (s64) into %ir.1)
FSD killed renamable $f12_d, renamable $x10, 816 :: (volatile store (s64) into %ir.2)
@@ -716,6 +904,7 @@ body: |
; RV32-NEXT: dead renamable $x11 = LW $x12, 24 :: (volatile load (s32) from %ir.2)
; RV32-NEXT: dead renamable $x10 = LW killed $x12, 28 :: (volatile load (s32) from %ir.3)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: load_large_offset
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -739,14 +928,15 @@ body: |
bb.0.entry:
liveins: $x10
- ; RV32-LABEL: name: load_large_offset_float
- ; RV32: liveins: $x10
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x11 = ADDI $x10, 384
- ; RV32-NEXT: renamable $f10_f = FLW $x11, 16 :: (load (s32) from %ir.arrayidx)
- ; RV32-NEXT: renamable $f11_f = FLW $x11, 20 :: (load (s32) from %ir.arrayidx1)
- ; RV32-NEXT: renamable $f12_f = FLW killed $x11, 24 :: (load (s32) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ; RV32C-LABEL: name: load_large_offset_float
+ ; RV32C: liveins: $x10
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x11 = ADDI $x10, 384
+ ; RV32C-NEXT: renamable $f10_f = FLW $x11, 16 :: (load (s32) from %ir.arrayidx)
+ ; RV32C-NEXT: renamable $f11_f = FLW $x11, 20 :: (load (s32) from %ir.arrayidx1)
+ ; RV32C-NEXT: renamable $f12_f = FLW killed $x11, 24 :: (load (s32) from %ir.arrayidx2)
+ ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ;
; RV64-LABEL: name: load_large_offset_float
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -754,6 +944,23 @@ body: |
; RV64-NEXT: renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
; RV64-NEXT: renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2)
; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ;
+ ; RV32ZCF-LABEL: name: load_large_offset_float
+ ; RV32ZCF: liveins: $x10
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: $x11 = ADDI $x10, 384
+ ; RV32ZCF-NEXT: renamable $f10_f = FLW $x11, 16 :: (load (s32) from %ir.arrayidx)
+ ; RV32ZCF-NEXT: renamable $f11_f = FLW $x11, 20 :: (load (s32) from %ir.arrayidx1)
+ ; RV32ZCF-NEXT: renamable $f12_f = FLW killed $x11, 24 :: (load (s32) from %ir.arrayidx2)
+ ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ;
+ ; RV32ZCA-LABEL: name: load_large_offset_float
+ ; RV32ZCA: liveins: $x10
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
+ ; RV32ZCA-NEXT: renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
+ ; RV32ZCA-NEXT: renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2)
+ ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2)
@@ -767,22 +974,47 @@ body: |
bb.0.entry:
liveins: $x10
- ; RV32-LABEL: name: load_large_offset_double
- ; RV32: liveins: $x10
- ; RV32-NEXT: {{ $}}
- ; RV32-NEXT: $x11 = ADDI $x10, 768
- ; RV32-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx)
- ; RV32-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1)
- ; RV32-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
- ; RV64-LABEL: name: load_large_offset_double
- ; RV64: liveins: $x10
- ; RV64-NEXT: {{ $}}
- ; RV64-NEXT: $x11 = ADDI $x10, 768
- ; RV64-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx)
- ; RV64-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1)
- ; RV64-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2)
- ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ; RV32C-LABEL: name: load_large_offset_double
+ ; RV32C: liveins: $x10
+ ; RV32C-NEXT: {{ $}}
+ ; RV32C-NEXT: $x11 = ADDI $x10, 768
+ ; RV32C-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx)
+ ; RV32C-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1)
+ ; RV32C-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2)
+ ; RV32C-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV64C-LABEL: name: load_large_offset_double
+ ; RV64C: liveins: $x10
+ ; RV64C-NEXT: {{ $}}
+ ; RV64C-NEXT: $x11 = ADDI $x10, 768
+ ; RV64C-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx)
+ ; RV64C-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1)
+ ; RV64C-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2)
+ ; RV64C-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV32ZCF-LABEL: name: load_large_offset_double
+ ; RV32ZCF: liveins: $x10
+ ; RV32ZCF-NEXT: {{ $}}
+ ; RV32ZCF-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx)
+ ; RV32ZCF-NEXT: renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1)
+ ; RV32ZCF-NEXT: renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2)
+ ; RV32ZCF-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV32ZCA-LABEL: name: load_large_offset_double
+ ; RV32ZCA: liveins: $x10
+ ; RV32ZCA-NEXT: {{ $}}
+ ; RV32ZCA-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx)
+ ; RV32ZCA-NEXT: renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1)
+ ; RV32ZCA-NEXT: renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2)
+ ; RV32ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ;
+ ; RV64ZCA-LABEL: name: load_large_offset_double
+ ; RV64ZCA: liveins: $x10
+ ; RV64ZCA-NEXT: {{ $}}
+ ; RV64ZCA-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx)
+ ; RV64ZCA-NEXT: renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1)
+ ; RV64ZCA-NEXT: renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2)
+ ; RV64ZCA-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx)
renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1)
renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2)
@@ -801,6 +1033,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: SW $x0, killed renamable $x10, 0 :: (store (s32) into %ir.a)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_value_no_opt
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -822,6 +1055,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: FSW killed renamable $f16_f, killed renamable $x10, 0 :: (store (s32) into %ir.a)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_value_float_no_opt
; RV64: liveins: $x10, $f16_f
; RV64-NEXT: {{ $}}
@@ -843,6 +1077,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: FSD killed renamable $f16_d, killed renamable $x10, 0 :: (store (s64) into %ir.a)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_value_double_no_opt
; RV64: liveins: $x10, $f16_d
; RV64-NEXT: {{ $}}
@@ -865,6 +1100,7 @@ body: |
; RV32-NEXT: renamable $x10 = ADDI $x0, 1
; RV32-NEXT: SW killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_ptr_no_opt
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -888,6 +1124,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: FSW killed renamable $f10_f, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_ptr_float_no_opt
; RV64: liveins: $x16, $f10_f
; RV64-NEXT: {{ $}}
@@ -909,6 +1146,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: FSD killed renamable $f10_d, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_common_ptr_double_no_opt
; RV64: liveins: $x16, $f10_d
; RV64-NEXT: {{ $}}
@@ -930,6 +1168,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: dead renamable $x10 = LW killed renamable $x16, 0 :: (volatile load (s32) from %ir.p)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: load_common_ptr_no_opt
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -951,6 +1190,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: renamable $f10_f = FLW killed renamable $x16, 0 :: (load (s32) from %ir.g)
; RV32-NEXT: PseudoRET implicit $f10_f
+ ;
; RV64-LABEL: name: load_common_ptr_float_no_opt
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -972,6 +1212,7 @@ body: |
; RV32-NEXT: {{ $}}
; RV32-NEXT: renamable $f10_d = FLD killed renamable $x16, 0 :: (load (s64) from %ir.g)
; RV32-NEXT: PseudoRET implicit $f10_d
+ ;
; RV64-LABEL: name: load_common_ptr_double_no_opt
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -996,6 +1237,7 @@ body: |
; RV32-NEXT: renamable $x11 = ADDI $x0, 3
; RV32-NEXT: SW killed renamable $x11, killed renamable $x10, 404 :: (volatile store (s32) into %ir.1)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_large_offset_no_opt
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -1024,6 +1266,7 @@ body: |
; RV32-NEXT: FSW killed renamable $f10_f, renamable $x10, 400 :: (volatile store (s32) into %ir.0)
; RV32-NEXT: FSW killed renamable $f11_f, killed renamable $x10, 404 :: (volatile store (s32) into %ir.1)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_large_offset_float_no_opt
; RV64: liveins: $x10, $f10_f, $f11_f
; RV64-NEXT: {{ $}}
@@ -1048,6 +1291,7 @@ body: |
; RV32-NEXT: FSD killed renamable $f10_d, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
; RV32-NEXT: FSD killed renamable $f11_d, killed renamable $x10, 808 :: (volatile store (s64) into %ir.1)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: store_large_offset_double_no_opt
; RV64: liveins: $x10, $f10_d, $f11_d
; RV64-NEXT: {{ $}}
@@ -1072,6 +1316,7 @@ body: |
; RV32-NEXT: dead renamable $x11 = LW renamable $x10, 400 :: (volatile load (s32) from %ir.0)
; RV32-NEXT: dead renamable $x10 = LW killed renamable $x10, 404 :: (volatile load (s32) from %ir.1)
; RV32-NEXT: PseudoRET
+ ;
; RV64-LABEL: name: load_large_offset_no_opt
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -1096,6 +1341,7 @@ body: |
; RV32-NEXT: renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
; RV32-NEXT: renamable $f11_f = FLW killed renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
; RV32-NEXT: PseudoRET implicit $f10_f, implicit $f11_f
+ ;
; RV64-LABEL: name: load_large_offset_float_no_opt
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -1120,6 +1366,7 @@ body: |
; RV32-NEXT: renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx)
; RV32-NEXT: renamable $f11_d = FLD killed renamable $x10, 808 :: (load (s64) from %ir.arrayidx1)
; RV32-NEXT: PseudoRET implicit $f10_d, implicit $f11_d
+ ;
; RV64-LABEL: name: load_large_offset_double_no_opt
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/RISCV/pr51206.ll b/llvm/test/CodeGen/RISCV/pr51206.ll
index f54031af0de5..8aa145f6ac5e 100644
--- a/llvm/test/CodeGen/RISCV/pr51206.ll
+++ b/llvm/test/CodeGen/RISCV/pr51206.ll
@@ -27,16 +27,12 @@ define signext i32 @wobble() nounwind {
; CHECK-NEXT: lui a2, %hi(global.3)
; CHECK-NEXT: li a3, 5
; CHECK-NEXT: sw a1, %lo(global.3)(a2)
-; CHECK-NEXT: bltu a0, a3, .LBB0_2
-; CHECK-NEXT: # %bb.1: # %bb10
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT: call quux
-; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .LBB0_2: # %bb12
+; CHECK-NEXT: bgeu a0, a3, .LBB0_2
+; CHECK-NEXT: # %bb.1: # %bb12
; CHECK-NEXT: li a0, 0
; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB0_2: # %bb10
+; CHECK-NEXT: tail quux
bb:
%tmp = load i8, ptr @global, align 1
%tmp1 = zext i8 %tmp to i32
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 1724b48dd6be..a09ab3ee0252 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN: -riscv-enable-sink-fold -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32V
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32V
; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN: -riscv-enable-sink-fold -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V
; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfh,+zve32f,+zvl128b -target-abi=ilp32d \
-; RUN: -riscv-enable-sink-fold -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVE32F
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32ZVE32F
; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfh,+zve32f,+zvl128b -target-abi=lp64d \
-; RUN: -riscv-enable-sink-fold -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32F
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32F
declare <1 x i8> @llvm.masked.gather.v1i8.v1p0(<1 x ptr>, i32, <1 x i1>, <1 x i8>)
@@ -15086,31 +15086,27 @@ define <32 x i64> @mgather_strided_split(ptr %base) {
ret <32 x i64> %x
}
-; FIXME: This is a miscompile triggered by the mgather ->
-; riscv.masked.strided.load combine. In order for it to trigger we need either a
-; strided gather that RISCVGatherScatterLowering doesn't pick up, or a new
-; strided gather generated by the widening sew combine.
define <4 x i32> @masked_gather_widen_sew_negative_stride(ptr %base) {
; RV32V-LABEL: masked_gather_widen_sew_negative_stride:
; RV32V: # %bb.0:
-; RV32V-NEXT: addi a0, a0, -128
-; RV32V-NEXT: li a1, -128
+; RV32V-NEXT: addi a0, a0, 136
+; RV32V-NEXT: li a1, -136
; RV32V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32V-NEXT: vlse64.v v8, (a0), a1
; RV32V-NEXT: ret
;
; RV64V-LABEL: masked_gather_widen_sew_negative_stride:
; RV64V: # %bb.0:
-; RV64V-NEXT: addi a0, a0, -128
-; RV64V-NEXT: li a1, -128
+; RV64V-NEXT: addi a0, a0, 136
+; RV64V-NEXT: li a1, -136
; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64V-NEXT: vlse64.v v8, (a0), a1
; RV64V-NEXT: ret
;
; RV32ZVE32F-LABEL: masked_gather_widen_sew_negative_stride:
; RV32ZVE32F: # %bb.0:
-; RV32ZVE32F-NEXT: lui a1, 16392
-; RV32ZVE32F-NEXT: addi a1, a1, 1152
+; RV32ZVE32F-NEXT: lui a1, 16393
+; RV32ZVE32F-NEXT: addi a1, a1, -888
; RV32ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.s.x v9, a1
; RV32ZVE32F-NEXT: vluxei8.v v8, (a0), v9
@@ -15118,8 +15114,8 @@ define <4 x i32> @masked_gather_widen_sew_negative_stride(ptr %base) {
;
; RV64ZVE32F-LABEL: masked_gather_widen_sew_negative_stride:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: addi a1, a0, 128
-; RV64ZVE32F-NEXT: lw a2, 132(a0)
+; RV64ZVE32F-NEXT: addi a1, a0, 136
+; RV64ZVE32F-NEXT: lw a2, 140(a0)
; RV64ZVE32F-NEXT: lw a3, 0(a0)
; RV64ZVE32F-NEXT: lw a0, 4(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
@@ -15128,7 +15124,7 @@ define <4 x i32> @masked_gather_widen_sew_negative_stride(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: ret
- %ptrs = getelementptr i32, ptr %base, <4 x i64> <i64 32, i64 33, i64 0, i64 1>
+ %ptrs = getelementptr i32, ptr %base, <4 x i64> <i64 34, i64 35, i64 0, i64 1>
%x = call <4 x i32> @llvm.masked.gather.v4i32.v32p0(<4 x ptr> %ptrs, i32 8, <4 x i1> shufflevector(<4 x i1> insertelement(<4 x i1> poison, i1 true, i32 0), <4 x i1> poison, <4 x i32> zeroinitializer), <4 x i32> poison)
ret <4 x i32> %x
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
new file mode 100644
index 000000000000..6c5dd0403dff
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -0,0 +1,1701 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <8 x i7> @llvm.vp.sadd.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
+
+define <8 x i7> @vsadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v9, v9, v9
+; CHECK-NEXT: vsra.vi v9, v9, 1
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsra.vi v8, v8, 1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 63
+; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
+; CHECK-NEXT: li a0, 192
+; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i7> @llvm.vp.sadd.sat.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i7> %v
+}
+
+declare <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vsadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsadd_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsadd_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsadd_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsadd_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsadd_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.sadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vsadd_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsadd_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsadd_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsadd_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v4i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsadd_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsadd_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.sadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+declare <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32)
+
+define <5 x i8> @vsadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsadd_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.sadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vsadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsadd_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsadd_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsadd_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsadd_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsadd_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.sadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vsadd_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsadd_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsadd_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsadd_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsadd_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsadd_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.sadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+declare <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
+
+define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v258i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: addi a0, a1, -128
+; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a0, a3, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB32_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+define <256 x i8> @vsadd_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v258i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: bltu a0, a2, .LBB33_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB33_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: addi a1, a0, -128
+; CHECK-NEXT: sltu a0, a0, a1
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v16, v16, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %head = insertelement <256 x i1> poison, i1 true, i32 0
+ %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+; Test splitting when the %evl is a known constant.
+
+define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vsadd_vi_v258i8_evl129:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+ ret <256 x i8> %v
+}
+
+; FIXME: The upper half is doing nothing.
+
+define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vsadd_vi_v258i8_evl128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+ ret <256 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vsadd_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsadd_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsadd_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsadd_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsadd_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsadd_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.sadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vsadd_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsadd_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsadd_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsadd_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsadd_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsadd_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.sadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vsadd_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsadd_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsadd_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsadd_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsadd_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsadd_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.sadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vsadd_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsadd_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsadd_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsadd_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsadd_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsadd_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.sadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vsadd_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsadd_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsadd_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsadd_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsadd_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsadd_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.sadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vsadd_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsadd_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsadd_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsadd_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsadd_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsadd_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.sadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vsadd_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsadd_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsadd_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsadd_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsadd_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsadd_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.sadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vsadd_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsadd_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsadd_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsadd_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsadd_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsadd_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.sadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vsadd_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsadd_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsadd_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsadd_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsadd_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.sadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vsadd_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsadd_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsadd_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsadd_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsadd_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.sadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vsadd_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsadd_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsadd_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsadd_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsadd_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.sadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vsadd_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsadd_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsadd_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsadd_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsadd_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.sadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+; Test that split-legalization works as expected.
+
+declare <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
+
+define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_v32i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB108_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB108_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v32i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB108_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB108_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vi v8, v8, -1, v0.t
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vsadd_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vi_v32i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB109_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB109_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v24
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v16, v16, v24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vi_v32i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB109_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB109_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vi v8, v8, -1
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vi v16, v16, -1
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %head = insertelement <32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+; FIXME: We don't match vsadd.vi on RV32.
+
+define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vsadd_vx_v32i64_evl12:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v32i64_evl12:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vi v8, v8, -1, v0.t
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vsadd_vx_v32i64_evl27:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_v32i64_evl27:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vi v8, v8, -1, v0.t
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+ ret <32 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
new file mode 100644
index 000000000000..6227f8abe599
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -0,0 +1,1697 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <8 x i7> @llvm.vp.uadd.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
+
+define <8 x i7> @vsaddu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 127
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vminu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i7> @llvm.vp.uadd.sat.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i7> %v
+}
+
+declare <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vsaddu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsaddu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsaddu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsaddu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsaddu_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vsaddu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.uadd.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vsaddu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsaddu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsaddu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsaddu_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v4i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsaddu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsaddu_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vsaddu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.uadd.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+declare <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32)
+
+define <5 x i8> @vsaddu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsaddu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsaddu_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsaddu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsaddu_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vsaddu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.uadd.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vsaddu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsaddu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsaddu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsaddu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsaddu_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vsaddu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.uadd.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vsaddu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsaddu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsaddu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsaddu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsaddu_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vsaddu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.uadd.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+declare <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
+
+define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v258i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: addi a0, a1, -128
+; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a0, a3, a0
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB32_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+define <256 x i8> @vsaddu_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v258i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: bltu a0, a2, .LBB33_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB33_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: addi a1, a0, -128
+; CHECK-NEXT: sltu a0, a0, a1
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v16, v16, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %head = insertelement <256 x i1> poison, i1 true, i32 0
+ %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+; Test splitting when the %evl is a known constant.
+
+define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vsaddu_vi_v258i8_evl129:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+ ret <256 x i8> %v
+}
+
+; FIXME: The upper half is doing nothing.
+
+define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vsaddu_vi_v258i8_evl128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+ ret <256 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vsaddu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsaddu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsaddu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsaddu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsaddu_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vsaddu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.uadd.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vsaddu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsaddu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsaddu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsaddu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsaddu_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vsaddu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.uadd.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vsaddu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsaddu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsaddu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsaddu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsaddu_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vsaddu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.uadd.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vsaddu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsaddu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsaddu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsaddu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsaddu_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vsaddu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.uadd.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vsaddu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsaddu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsaddu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsaddu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsaddu_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vsaddu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.uadd.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vsaddu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsaddu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsaddu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsaddu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsaddu_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vsaddu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.uadd.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vsaddu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsaddu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsaddu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsaddu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsaddu_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vsaddu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.uadd.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vsaddu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsaddu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsaddu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsaddu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsaddu_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vsaddu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.uadd.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vsaddu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsaddu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsaddu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsaddu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsaddu_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vsaddu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.uadd.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vsaddu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsaddu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsaddu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsaddu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsaddu_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vsaddu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.uadd.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vsaddu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsaddu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsaddu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsaddu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsaddu_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vsaddu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.uadd.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vsaddu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsaddu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsaddu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsaddu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsaddu_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vsaddu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.uadd.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+; Test that split-legalization works as expected.
+
+declare <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
+
+define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_v32i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB108_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB108_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v32i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB108_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB108_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vsaddu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vi_v32i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB109_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB109_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v24
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v16, v16, v24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vi_v32i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB109_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB109_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vi v8, v8, -1
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vi v16, v16, -1
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %head = insertelement <32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+; FIXME: We don't match vsaddu.vi on RV32.
+
+define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vsaddu_vx_v32i64_evl12:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v32i64_evl12:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vsaddu_vx_v32i64_evl27:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_v32i64_evl27:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+ ret <32 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
new file mode 100644
index 000000000000..6360cf49d8d4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -0,0 +1,1745 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <8 x i7> @llvm.vp.ssub.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
+
+define <8 x i7> @vssub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vadd.vv v9, v9, v9
+; CHECK-NEXT: vsra.vi v9, v9, 1
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsra.vi v8, v8, 1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 63
+; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
+; CHECK-NEXT: li a0, 192
+; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i7> @llvm.vp.ssub.sat.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i7> %v
+}
+
+declare <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vssub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssub_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssub_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssub_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssub_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.ssub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vssub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssub_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssub_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssub_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v4i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v9, v8, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssub_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssub_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssub_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.ssub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+declare <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32)
+
+define <5 x i8> @vssub_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssub_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssub_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssub_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssub_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssub_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.ssub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vssub_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssub_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssub_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssub_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssub_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssub_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.ssub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vssub_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssub_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssub_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssub_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssub_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssub_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.ssub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+declare <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
+
+define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v258i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: addi a0, a1, -128
+; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a0
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB32_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+define <256 x i8> @vssub_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v258i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: bltu a0, a2, .LBB33_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB33_2:
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a2
+; CHECK-NEXT: addi a1, a0, -128
+; CHECK-NEXT: sltu a0, a0, a1
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v16, v16, a2
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %head = insertelement <256 x i1> poison, i1 true, i32 0
+ %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+; Test splitting when the %evl is a known constant.
+
+define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vssub_vi_v258i8_evl129:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+ ret <256 x i8> %v
+}
+
+; FIXME: The upper half is doing nothing.
+
+define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vssub_vi_v258i8_evl128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+ ret <256 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vssub_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssub_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssub_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssub_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssub_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssub_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.ssub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vssub_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssub_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssub_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssub_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssub_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssub_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.ssub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vssub_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssub_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssub_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssub_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssub_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssub_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.ssub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vssub_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssub_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssub_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssub_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssub_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssub_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.ssub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vssub_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssub_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssub_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssub_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssub_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssub_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.ssub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vssub_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssub_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssub_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssub_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssub_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssub_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.ssub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vssub_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssub_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssub_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssub_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssub_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssub_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.ssub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vssub_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssub_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssub_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssub_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssub_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssub_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.ssub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vssub_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssub_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssub_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssub_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.ssub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vssub_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssub_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssub_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssub_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.ssub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vssub_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssub_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssub_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssub_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.ssub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vssub_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssub_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssub_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssub_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.ssub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+; Test that split-legalization works as expected.
+
+declare <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
+
+define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_v32i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB108_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB108_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v32i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB108_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB108_2:
+; RV64-NEXT: li a2, -1
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a2, v0.t
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vssub.vx v16, v16, a2, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vssub_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vi_v32i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB109_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB109_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v24
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v16, v16, v24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vi_v32i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB109_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB109_2:
+; RV64-NEXT: li a2, -1
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a2
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v16, v16, a2
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %head = insertelement <32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+; FIXME: We don't match vssub.vi on RV32.
+
+define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vssub_vx_v32i64_evl12:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v32i64_evl12:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vssub.vx v16, v16, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vssub_vx_v32i64_evl27:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_v32i64_evl27:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vssub.vx v16, v16, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+ ret <32 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
new file mode 100644
index 000000000000..6ea975887123
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -0,0 +1,1740 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <8 x i7> @llvm.vp.usub.sat.v8i7(<8 x i7>, <8 x i7>, <8 x i1>, i32)
+
+define <8 x i7> @vssubu_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 127
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i7> @llvm.vp.usub.sat.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i7> %v
+}
+
+declare <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vssubu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssubu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssubu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssubu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssubu_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vssubu_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.usub.sat.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vssubu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssubu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssubu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssubu_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v4i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v9, v8, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %vb, <4 x i8> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssubu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssubu_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vssubu_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.usub.sat.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+declare <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32)
+
+define <5 x i8> @vssubu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssubu_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssubu_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssubu_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssubu_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v5i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+define <5 x i8> @vssubu_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v5i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <5 x i8> %elt.head, <5 x i8> poison, <5 x i32> zeroinitializer
+ %head = insertelement <5 x i1> poison, i1 true, i32 0
+ %m = shufflevector <5 x i1> %head, <5 x i1> poison, <5 x i32> zeroinitializer
+ %v = call <5 x i8> @llvm.vp.usub.sat.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl)
+ ret <5 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vssubu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssubu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssubu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssubu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssubu_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vssubu_vi_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.usub.sat.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vssubu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssubu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssubu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssubu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssubu_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vssubu_vi_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.usub.sat.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+declare <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8>, <256 x i8>, <256 x i1>, i32)
+
+define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v258i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: addi a0, a1, -128
+; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a0
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB32_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+define <256 x i8> @vssubu_vi_v258i8_unmasked(<256 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v258i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: bltu a0, a2, .LBB33_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: .LBB33_2:
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a2
+; CHECK-NEXT: addi a1, a0, -128
+; CHECK-NEXT: sltu a0, a0, a1
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v16, v16, a2
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %head = insertelement <256 x i1> poison, i1 true, i32 0
+ %m = shufflevector <256 x i1> %head, <256 x i1> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 %evl)
+ ret <256 x i8> %v
+}
+
+; Test splitting when the %evl is a known constant.
+
+define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vssubu_vi_v258i8_evl129:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 129)
+ ret <256 x i8> %v
+}
+
+; FIXME: The upper half is doing nothing.
+
+define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
+; CHECK-LABEL: vssubu_vi_v258i8_evl128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 128
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v24, (a0)
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <256 x i8> %elt.head, <256 x i8> poison, <256 x i32> zeroinitializer
+ %v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1> %m, i32 128)
+ ret <256 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vssubu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssubu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssubu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssubu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssubu_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vssubu_vi_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.usub.sat.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vssubu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssubu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssubu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssubu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssubu_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vssubu_vi_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.usub.sat.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vssubu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssubu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssubu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssubu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssubu_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vssubu_vi_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.usub.sat.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vssubu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssubu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssubu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssubu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssubu_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vssubu_vi_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.usub.sat.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vssubu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssubu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssubu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssubu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssubu_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vssubu_vi_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.usub.sat.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vssubu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssubu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssubu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssubu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssubu_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vssubu_vi_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.usub.sat.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vssubu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssubu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssubu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssubu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssubu_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vssubu_vi_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.usub.sat.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vssubu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssubu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssubu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssubu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssubu_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vssubu_vi_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.usub.sat.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vssubu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssubu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssubu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssubu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssubu_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vssubu_vi_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.usub.sat.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vssubu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssubu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssubu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssubu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssubu_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vssubu_vi_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.usub.sat.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vssubu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssubu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssubu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssubu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssubu_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vssubu_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.usub.sat.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vssubu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssubu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssubu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssubu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssubu_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vssubu_vi_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.usub.sat.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+; Test that split-legalization works as expected.
+
+declare <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
+
+define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_v32i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB108_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB108_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v32i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB108_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB108_2:
+; RV64-NEXT: li a2, -1
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a2, v0.t
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vssubu.vx v16, v16, a2, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vssubu_vi_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vi_v32i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB109_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB109_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v24
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v16, v16, v24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vi_v32i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB109_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB109_2:
+; RV64-NEXT: li a2, -1
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a2
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v16, v16, a2
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %head = insertelement <32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+; FIXME: We don't match vssubu.vi on RV32.
+
+define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vssubu_vx_v32i64_evl12:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v32i64_evl12:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vssubu.vx v16, v16, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 12)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
+; RV32-LABEL: vssubu_vx_v32i64_evl27:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v1, v0, 2
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v24, -1
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_v32i64_evl27:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: li a0, -1
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vssubu.vx v16, v16, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> %vb, <32 x i1> %m, i32 27)
+ ret <32 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index 574c2652ccfa..a084b5383b40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -385,12 +385,12 @@ define <32 x i64> @vwsubu_v32i64(ptr %x, ptr %y) nounwind {
define <2 x i32> @vwsubu_v2i32_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwsubu_v2i32_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwsubu.vv v8, v10, v11
+; CHECK-NEXT: vwsubu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -899,12 +899,12 @@ define <2 x i64> @vwsubu_vx_v2i64_i64(ptr %x, ptr %y) nounwind {
define <2 x i32> @vwsubu_v2i32_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwsubu_v2i32_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwsubu.vv v8, v10, v11
+; CHECK-NEXT: vwsubu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -917,12 +917,12 @@ define <2 x i32> @vwsubu_v2i32_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwsubu_v2i64_of_v2i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwsubu_v2i64_of_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vle8.v v9, (a1)
-; CHECK-NEXT: vzext.vf4 v10, v8
-; CHECK-NEXT: vzext.vf4 v11, v9
-; CHECK-NEXT: vwsubu.vv v8, v10, v11
+; CHECK-NEXT: vwsubu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vsext.vf4 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = load <2 x i8>, ptr %y
@@ -935,12 +935,12 @@ define <2 x i64> @vwsubu_v2i64_of_v2i8(ptr %x, ptr %y) {
define <2 x i64> @vwsubu_v2i64_of_v2i16(ptr %x, ptr %y) {
; CHECK-LABEL: vwsubu_v2i64_of_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vzext.vf2 v11, v9
-; CHECK-NEXT: vwsubu.vv v8, v10, v11
+; CHECK-NEXT: vwsubu.vv v10, v8, v9
+; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT: vsext.vf2 v8, v10
; CHECK-NEXT: ret
%a = load <2 x i16>, ptr %x
%b = load <2 x i16>, ptr %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
new file mode 100644
index 000000000000..caaeae55ed78
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -0,0 +1,2015 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 8 x i7> @llvm.vp.sadd.sat.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i7> @vsadd_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsra.vi v8, v8, 1
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: li a0, 63
+; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
+; CHECK-NEXT: li a0, 192
+; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i7> @llvm.vp.sadd.sat.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
+ ret <vscale x 8 x i7> %v
+}
+
+declare <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vsadd_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsadd_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsadd_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsadd_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv1i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsadd_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsadd_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsadd_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.sadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vsadd_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsadd_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsadd_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsadd_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsadd_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsadd_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.sadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vsadd_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsadd_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsadd_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsadd_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsadd_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsadd_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.sadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vsadd_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsadd_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsadd_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsadd_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsadd_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsadd_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.sadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vsadd_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsadd_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsadd_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsadd_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsadd_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsadd_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.sadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vsadd_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsadd_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsadd_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsadd_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsadd_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsadd_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.sadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vsadd_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsadd_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsadd_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsadd_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsadd_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsadd_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.sadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vsadd_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsadd_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsadd_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsadd_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsadd_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsadd_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.sadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+; Test that split-legalization works when the mask itself needs splitting.
+
+declare <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
+
+define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv128i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+define <vscale x 128 x i8> @vsadd_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv128i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v16, v16, -1
+; CHECK-NEXT: bltu a0, a1, .LBB51_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vsadd_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsadd_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsadd_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsadd_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsadd_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsadd_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.sadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vsadd_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsadd_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsadd_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsadd_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsadd_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsadd_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.sadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vsadd_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsadd_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsadd_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsadd_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsadd_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsadd_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.sadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vsadd_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsadd_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsadd_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsadd_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsadd_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsadd_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.sadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vsadd_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsadd_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsadd_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsadd_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsadd_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsadd_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.sadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vsadd_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsadd_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsadd_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsadd_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsadd_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsadd_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.sadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vsadd_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsadd_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsadd_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsadd_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsadd_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsadd_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.sadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vsadd_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsadd_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsadd_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsadd_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsadd_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsadd_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.sadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vsadd_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsadd_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsadd_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsadd_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsadd_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsadd_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.sadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vsadd_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsadd_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsadd_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsadd_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsadd_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsadd_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.sadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vsadd_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsadd_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsadd_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsadd_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vx_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsadd_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsadd_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.sadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+; Test that split-legalization works then the mask needs manual splitting.
+
+declare <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv32i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 2
+; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB118_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+define <vscale x 32 x i32> @vsadd_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv32i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v16, v16, -1
+; CHECK-NEXT: bltu a0, a1, .LBB119_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB119_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vsadd_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsadd_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv1i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv1i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsadd_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsadd_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.sadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vsadd_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsadd_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsadd_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsadd_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.sadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vsadd_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsadd_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsadd_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsadd_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.sadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vsadd_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsadd_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vv_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsadd_vx_nxv8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsadd.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsadd_vx_nxv8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsadd_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsadd_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsadd_vi_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsadd.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.sadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
new file mode 100644
index 000000000000..c0779e508c0a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -0,0 +1,2014 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 8 x i7> @llvm.vp.uadd.sat.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i7> @vsaddu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 127
+; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vand.vx v9, v9, a2
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vminu.vx v8, v8, a2, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i7> @llvm.vp.uadd.sat.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
+ ret <vscale x 8 x i7> %v
+}
+
+declare <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vsaddu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsaddu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsaddu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsaddu_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv1i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsaddu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsaddu_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vsaddu_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.uadd.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vsaddu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsaddu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsaddu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsaddu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsaddu_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vsaddu_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.uadd.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vsaddu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsaddu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsaddu_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsaddu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsaddu_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vsaddu_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.uadd.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vsaddu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsaddu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsaddu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsaddu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsaddu_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vsaddu_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.uadd.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vsaddu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsaddu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsaddu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsaddu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsaddu_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vsaddu_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.uadd.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vsaddu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsaddu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsaddu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsaddu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsaddu_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vsaddu_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.uadd.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vsaddu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsaddu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsaddu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsaddu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsaddu_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vsaddu_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.uadd.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vsaddu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsaddu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsaddu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsaddu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsaddu_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vsaddu_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.uadd.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+; Test that split-legalization works when the mask itself needs splitting.
+
+declare <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
+
+define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv128i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a1, a0
+; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+define <vscale x 128 x i8> @vsaddu_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv128i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v16, v16, -1
+; CHECK-NEXT: bltu a0, a1, .LBB51_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vsaddu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsaddu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsaddu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsaddu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsaddu_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vsaddu_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.uadd.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vsaddu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsaddu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsaddu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsaddu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsaddu_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vsaddu_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.uadd.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vsaddu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsaddu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsaddu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsaddu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsaddu_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vsaddu_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.uadd.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vsaddu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsaddu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsaddu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsaddu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsaddu_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vsaddu_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.uadd.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vsaddu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsaddu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsaddu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsaddu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsaddu_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vsaddu_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.uadd.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vsaddu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsaddu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsaddu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsaddu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsaddu_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vsaddu_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.uadd.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vsaddu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsaddu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsaddu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsaddu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsaddu_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vsaddu_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.uadd.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vsaddu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsaddu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsaddu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsaddu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsaddu_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vsaddu_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.uadd.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vsaddu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsaddu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsaddu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsaddu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsaddu_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vsaddu_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.uadd.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vsaddu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsaddu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsaddu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsaddu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsaddu_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vsaddu_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.uadd.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vsaddu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsaddu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsaddu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsaddu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vx_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsaddu_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vsaddu_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.uadd.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+; Test that split-legalization works then the mask needs manual splitting.
+
+declare <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv32i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 2
+; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB118_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+define <vscale x 32 x i32> @vsaddu_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv32i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v16, v16, -1
+; CHECK-NEXT: bltu a0, a1, .LBB119_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB119_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vsaddu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsaddu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsaddu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsaddu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv1i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv1i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsaddu_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vsaddu_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.uadd.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vsaddu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsaddu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsaddu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsaddu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsaddu_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vsaddu_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.uadd.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vsaddu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsaddu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsaddu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsaddu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsaddu_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vsaddu_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.uadd.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vsaddu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsaddu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vv_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsaddu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsaddu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vsaddu_vx_nxv8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsaddu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vsaddu_vx_nxv8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsaddu_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vsaddu_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsaddu_vi_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vsaddu.vi v8, v8, -1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.uadd.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
new file mode 100644
index 000000000000..2d51a2ee44f6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -0,0 +1,2067 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 8 x i7> @llvm.vp.ssub.sat.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i7> @vssub_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v8
+; CHECK-NEXT: vsra.vi v8, v8, 1
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: li a0, 63
+; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
+; CHECK-NEXT: li a0, 192
+; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i7> @llvm.vp.ssub.sat.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
+ ret <vscale x 8 x i7> %v
+}
+
+declare <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vssub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssub_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssub_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv1i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v9, v8, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssub_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssub_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.ssub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vssub_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssub_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssub_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssub_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.ssub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vssub_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssub_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssub_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssub_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssub_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssub_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.ssub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vssub_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssub_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssub_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssub_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.ssub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vssub_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssub_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssub_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssub_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.ssub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vssub_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssub_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssub_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssub_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.ssub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vssub_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssub_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssub_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssub_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.ssub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vssub_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssub_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssub_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssub_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.ssub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+; Test that split-legalization works when the mask itself needs splitting.
+
+declare <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
+
+define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv128i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a0, a1, a2
+; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a0
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+define <vscale x 128 x i8> @vssub_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv128i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v16, v16, a2
+; CHECK-NEXT: bltu a0, a1, .LBB51_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a2
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vssub_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssub_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssub_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssub_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.ssub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vssub_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssub_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssub_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssub_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.ssub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vssub_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssub_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssub_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssub_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.ssub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vssub_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssub_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssub_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssub_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.ssub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vssub_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssub_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssub_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssub_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.ssub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vssub_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssub_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssub_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssub_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.ssub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vssub_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssub_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssub_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssub_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.ssub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vssub_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssub_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssub_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssub_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.ssub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vssub_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssub_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssub_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssub_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.ssub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vssub_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssub_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssub_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssub_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.ssub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vssub_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssub_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vx_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssub_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssub_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.ssub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+; Test that split-legalization works then the mask needs manual splitting.
+
+declare <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv32i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a1, a2, 2
+; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a1
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: sub a1, a0, a2
+; CHECK-NEXT: sltu a3, a0, a1
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a1
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v16, v16, a1, v0.t
+; CHECK-NEXT: bltu a0, a2, .LBB118_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+define <vscale x 32 x i32> @vssub_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv32i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v16, v16, a2
+; CHECK-NEXT: bltu a0, a1, .LBB119_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB119_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a2
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vssub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssub_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv1i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv1i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssub_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssub_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.ssub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vssub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssub_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssub_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssub_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.ssub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vssub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssub_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssub_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssub_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.ssub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vssub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssub_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vv_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssub_vx_nxv8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssub.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssub_vx_nxv8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssub_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssub_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssub_vi_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.ssub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
new file mode 100644
index 000000000000..e5589ce1a9bc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -0,0 +1,2065 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 8 x i7> @llvm.vp.usub.sat.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i7> @vssubu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv8i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a2, 127
+; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vand.vx v9, v9, a2
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i7> @llvm.vp.usub.sat.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
+ ret <vscale x 8 x i7> %v
+}
+
+declare <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vssubu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssubu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssubu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssubu_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv1i8_commute:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v9, v8, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssubu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssubu_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vssubu_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.usub.sat.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vssubu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssubu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssubu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssubu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssubu_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vssubu_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.usub.sat.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
+
+define <vscale x 3 x i8> @vssubu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssubu_vv_nxv3i8_unmasked(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssubu_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssubu_vx_nxv3i8_unmasked(<vscale x 3 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssubu_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv3i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+define <vscale x 3 x i8> @vssubu_vi_nxv3i8_unmasked(<vscale x 3 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv3i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 3 x i8> %elt.head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
+ %head = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 3 x i1> %head, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
+ %v = call <vscale x 3 x i8> @llvm.vp.usub.sat.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb, <vscale x 3 x i1> %m, i32 %evl)
+ ret <vscale x 3 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vssubu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssubu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssubu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssubu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssubu_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vssubu_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.usub.sat.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vssubu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssubu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssubu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssubu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssubu_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vssubu_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.usub.sat.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vssubu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssubu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssubu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssubu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssubu_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vssubu_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.usub.sat.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vssubu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssubu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssubu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssubu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssubu_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vssubu_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.usub.sat.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vssubu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssubu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssubu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssubu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssubu_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vssubu_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.usub.sat.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+; Test that split-legalization works when the mask itself needs splitting.
+
+declare <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8>, <vscale x 128 x i8>, <vscale x 128 x i1>, i32)
+
+define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv128i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
+; CHECK-NEXT: vlm.v v0, (a0)
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub a0, a1, a2
+; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a0
+; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
+; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+define <vscale x 128 x i8> @vssubu_vi_nxv128i8_unmasked(<vscale x 128 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv128i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v16, v16, a2
+; CHECK-NEXT: bltu a0, a1, .LBB51_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a2
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 128 x i8> poison, i8 -1, i32 0
+ %vb = shufflevector <vscale x 128 x i8> %elt.head, <vscale x 128 x i8> poison, <vscale x 128 x i32> zeroinitializer
+ %head = insertelement <vscale x 128 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 128 x i1> %head, <vscale x 128 x i1> poison, <vscale x 128 x i32> zeroinitializer
+ %v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> %vb, <vscale x 128 x i1> %m, i32 %evl)
+ ret <vscale x 128 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vssubu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssubu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssubu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssubu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssubu_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vssubu_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.usub.sat.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vssubu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssubu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssubu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssubu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssubu_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vssubu_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.usub.sat.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vssubu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssubu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssubu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssubu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssubu_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vssubu_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.usub.sat.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vssubu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssubu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssubu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssubu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssubu_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vssubu_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.usub.sat.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vssubu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssubu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssubu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssubu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssubu_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vssubu_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.usub.sat.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vssubu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssubu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssubu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssubu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssubu_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vssubu_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.usub.sat.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vssubu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssubu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssubu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssubu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssubu_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vssubu_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.usub.sat.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vssubu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssubu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssubu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssubu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssubu_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vssubu_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.usub.sat.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vssubu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssubu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssubu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssubu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssubu_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vssubu_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.usub.sat.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vssubu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssubu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssubu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssubu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssubu_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vssubu_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.usub.sat.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vssubu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssubu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssubu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssubu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vx_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssubu_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vssubu_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.usub.sat.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+; Test that split-legalization works then the mask needs manual splitting.
+
+declare <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i32>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv32i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a1, a2, 2
+; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a1
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: sub a1, a0, a2
+; CHECK-NEXT: sltu a3, a0, a1
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a1
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v16, v16, a1, v0.t
+; CHECK-NEXT: bltu a0, a2, .LBB118_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: .LBB118_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+define <vscale x 32 x i32> @vssubu_vi_nxv32i32_unmasked(<vscale x 32 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv32i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
+; CHECK-NEXT: addi a3, a3, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v16, v16, a2
+; CHECK-NEXT: bltu a0, a1, .LBB119_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB119_2:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a2
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i32> poison, i32 -1, i32 0
+ %vb = shufflevector <vscale x 32 x i32> %elt.head, <vscale x 32 x i32> poison, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vssubu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssubu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssubu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v9, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssubu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv1i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv1i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssubu_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vssubu_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.usub.sat.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vssubu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssubu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssubu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssubu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssubu_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vssubu_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.usub.sat.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vssubu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssubu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssubu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssubu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssubu_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vssubu_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.usub.sat.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vssubu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssubu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vv_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssubu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssubu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vssubu_vx_nxv8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vssubu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vssubu_vx_nxv8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssubu_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vssubu_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vssubu_vi_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a1
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.usub.sat.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index e01984b7c584..e07e52091e9e 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -1606,23 +1606,255 @@ define i32 @select_cst_unknown(i32 signext %a, i32 signext %b) {
; RV64IMXVTCONDOPS-LABEL: select_cst_unknown:
; RV64IMXVTCONDOPS: # %bb.0:
; RV64IMXVTCONDOPS-NEXT: slt a0, a0, a1
-; RV64IMXVTCONDOPS-NEXT: li a1, -7
-; RV64IMXVTCONDOPS-NEXT: vt.maskcn a1, a1, a0
-; RV64IMXVTCONDOPS-NEXT: li a2, 5
-; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT: or a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: li a1, -12
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 5
; RV64IMXVTCONDOPS-NEXT: ret
;
; CHECKZICOND-LABEL: select_cst_unknown:
; CHECKZICOND: # %bb.0:
; CHECKZICOND-NEXT: slt a0, a0, a1
-; CHECKZICOND-NEXT: li a1, -7
-; CHECKZICOND-NEXT: czero.nez a1, a1, a0
-; CHECKZICOND-NEXT: li a2, 5
-; CHECKZICOND-NEXT: czero.eqz a0, a2, a0
-; CHECKZICOND-NEXT: or a0, a0, a1
+; CHECKZICOND-NEXT: li a1, -12
+; CHECKZICOND-NEXT: czero.nez a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 5
; CHECKZICOND-NEXT: ret
%cond = icmp slt i32 %a, %b
%ret = select i1 %cond, i32 5, i32 -7
ret i32 %ret
}
+
+define i32 @select_cst1(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst1:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: mv a1, a0
+; RV32IM-NEXT: li a0, 10
+; RV32IM-NEXT: bnez a1, .LBB43_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: li a0, 20
+; RV32IM-NEXT: .LBB43_2:
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst1:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: mv a1, a0
+; RV64IM-NEXT: li a0, 10
+; RV64IM-NEXT: bnez a1, .LBB43_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: li a0, 20
+; RV64IM-NEXT: .LBB43_2:
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst1:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: li a1, 10
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 10
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; CHECKZICOND-LABEL: select_cst1:
+; CHECKZICOND: # %bb.0:
+; CHECKZICOND-NEXT: li a1, 10
+; CHECKZICOND-NEXT: czero.nez a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 10
+; CHECKZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 10, i32 20
+ ret i32 %ret
+}
+
+define i32 @select_cst2(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst2:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: mv a1, a0
+; RV32IM-NEXT: li a0, 10
+; RV32IM-NEXT: bnez a1, .LBB44_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: lui a0, 5
+; RV32IM-NEXT: addi a0, a0, -480
+; RV32IM-NEXT: .LBB44_2:
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst2:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: mv a1, a0
+; RV64IM-NEXT: li a0, 10
+; RV64IM-NEXT: bnez a1, .LBB44_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: lui a0, 5
+; RV64IM-NEXT: addiw a0, a0, -480
+; RV64IM-NEXT: .LBB44_2:
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst2:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: lui a1, 5
+; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, -490
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 10
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_cst2:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: lui a1, 5
+; RV32IMZICOND-NEXT: addi a1, a1, -490
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: addi a0, a0, 10
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_cst2:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: lui a1, 5
+; RV64IMZICOND-NEXT: addiw a1, a1, -490
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: addi a0, a0, 10
+; RV64IMZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 10, i32 20000
+ ret i32 %ret
+}
+
+define i32 @select_cst3(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst3:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: bnez a0, .LBB45_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: lui a0, 5
+; RV32IM-NEXT: addi a0, a0, -480
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB45_2:
+; RV32IM-NEXT: lui a0, 7
+; RV32IM-NEXT: addi a0, a0, 1328
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst3:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: bnez a0, .LBB45_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: lui a0, 5
+; RV64IM-NEXT: addiw a0, a0, -480
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB45_2:
+; RV64IM-NEXT: lui a0, 7
+; RV64IM-NEXT: addiw a0, a0, 1328
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst3:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: lui a1, 1048574
+; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, -1808
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: lui a1, 7
+; RV64IMXVTCONDOPS-NEXT: addiw a1, a1, 1328
+; RV64IMXVTCONDOPS-NEXT: add a0, a0, a1
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; RV32IMZICOND-LABEL: select_cst3:
+; RV32IMZICOND: # %bb.0:
+; RV32IMZICOND-NEXT: lui a1, 1048574
+; RV32IMZICOND-NEXT: addi a1, a1, -1808
+; RV32IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV32IMZICOND-NEXT: lui a1, 7
+; RV32IMZICOND-NEXT: addi a1, a1, 1328
+; RV32IMZICOND-NEXT: add a0, a0, a1
+; RV32IMZICOND-NEXT: ret
+;
+; RV64IMZICOND-LABEL: select_cst3:
+; RV64IMZICOND: # %bb.0:
+; RV64IMZICOND-NEXT: lui a1, 1048574
+; RV64IMZICOND-NEXT: addiw a1, a1, -1808
+; RV64IMZICOND-NEXT: czero.nez a0, a1, a0
+; RV64IMZICOND-NEXT: lui a1, 7
+; RV64IMZICOND-NEXT: addiw a1, a1, 1328
+; RV64IMZICOND-NEXT: add a0, a0, a1
+; RV64IMZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 30000, i32 20000
+ ret i32 %ret
+}
+
+define i32 @select_cst4(i1 zeroext %cond) {
+; CHECK-LABEL: select_cst4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: xori a0, a0, 2047
+; CHECK-NEXT: ret
+ %ret = select i1 %cond, i32 -2048, i32 2047
+ ret i32 %ret
+}
+
+define i32 @select_cst5(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst5:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: mv a1, a0
+; RV32IM-NEXT: li a0, 2047
+; RV32IM-NEXT: bnez a1, .LBB47_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: lui a0, 1
+; RV32IM-NEXT: addi a0, a0, -2047
+; RV32IM-NEXT: .LBB47_2:
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst5:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: mv a1, a0
+; RV64IM-NEXT: li a0, 2047
+; RV64IM-NEXT: bnez a1, .LBB47_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: lui a0, 1
+; RV64IM-NEXT: addiw a0, a0, -2047
+; RV64IM-NEXT: .LBB47_2:
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst5:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: li a1, 2
+; RV64IMXVTCONDOPS-NEXT: vt.maskcn a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 2047
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; CHECKZICOND-LABEL: select_cst5:
+; CHECKZICOND: # %bb.0:
+; CHECKZICOND-NEXT: li a1, 2
+; CHECKZICOND-NEXT: czero.nez a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 2047
+; CHECKZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 2047, i32 2049
+ ret i32 %ret
+}
+
+define i32 @select_cst6(i1 zeroext %cond) {
+; RV32IM-LABEL: select_cst6:
+; RV32IM: # %bb.0:
+; RV32IM-NEXT: bnez a0, .LBB48_2
+; RV32IM-NEXT: # %bb.1:
+; RV32IM-NEXT: li a0, 2047
+; RV32IM-NEXT: ret
+; RV32IM-NEXT: .LBB48_2:
+; RV32IM-NEXT: lui a0, 1
+; RV32IM-NEXT: addi a0, a0, -2047
+; RV32IM-NEXT: ret
+;
+; RV64IM-LABEL: select_cst6:
+; RV64IM: # %bb.0:
+; RV64IM-NEXT: bnez a0, .LBB48_2
+; RV64IM-NEXT: # %bb.1:
+; RV64IM-NEXT: li a0, 2047
+; RV64IM-NEXT: ret
+; RV64IM-NEXT: .LBB48_2:
+; RV64IM-NEXT: lui a0, 1
+; RV64IM-NEXT: addiw a0, a0, -2047
+; RV64IM-NEXT: ret
+;
+; RV64IMXVTCONDOPS-LABEL: select_cst6:
+; RV64IMXVTCONDOPS: # %bb.0:
+; RV64IMXVTCONDOPS-NEXT: li a1, 2
+; RV64IMXVTCONDOPS-NEXT: vt.maskc a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT: addi a0, a0, 2047
+; RV64IMXVTCONDOPS-NEXT: ret
+;
+; CHECKZICOND-LABEL: select_cst6:
+; CHECKZICOND: # %bb.0:
+; CHECKZICOND-NEXT: li a1, 2
+; CHECKZICOND-NEXT: czero.eqz a0, a1, a0
+; CHECKZICOND-NEXT: addi a0, a0, 2047
+; CHECKZICOND-NEXT: ret
+ %ret = select i1 %cond, i32 2049, i32 2047
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
index a2a953ca882b..87f2a6306bd6 100644
--- a/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
+++ b/llvm/test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -871,3 +871,64 @@ define void @zext_nneg_dominating_icmp_i32_zeroext(i16 signext %0) {
5:
ret void
}
+
+; The load is used extended and non-extended in the successor basic block. The
+; signed compare will cause the non-extended value to exported out of the first
+; basic block using a sext to XLen. We need to CSE the zext nneg with the sext
+; so that we can form a sextload.
+define void @load_zext_nneg_sext_cse(ptr %p) nounwind {
+; RV32I-LABEL: load_zext_nneg_sext_cse:
+; RV32I: # %bb.0:
+; RV32I-NEXT: addi sp, sp, -16
+; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32I-NEXT: lh s0, 0(a0)
+; RV32I-NEXT: bltz s0, .LBB50_2
+; RV32I-NEXT: # %bb.1: # %bb1
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: call bar_i16
+; RV32I-NEXT: mv a0, s0
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: tail bar_i32
+; RV32I-NEXT: .LBB50_2: # %bb2
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
+; RV32I-NEXT: ret
+;
+; RV64-LABEL: load_zext_nneg_sext_cse:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64-NEXT: lh s0, 0(a0)
+; RV64-NEXT: bltz s0, .LBB50_2
+; RV64-NEXT: # %bb.1: # %bb1
+; RV64-NEXT: mv a0, s0
+; RV64-NEXT: call bar_i16
+; RV64-NEXT: mv a0, s0
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: tail bar_i32
+; RV64-NEXT: .LBB50_2: # %bb2
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %load = load i16, ptr %p
+ %zext = zext nneg i16 %load to i32
+ %cmp = icmp sgt i16 %load, -1
+ br i1 %cmp, label %bb1, label %bb2
+
+bb1:
+ tail call void @bar_i16(i16 signext %load)
+ tail call void @bar_i32(i32 signext %zext)
+ br label %bb2
+
+bb2:
+ ret void
+}
+declare void @bar_i16(i16);
diff --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll
index fc35bc4d2a16..8d065daa2067 100644
--- a/llvm/test/CodeGen/RISCV/split-offsets.ll
+++ b/llvm/test/CodeGen/RISCV/split-offsets.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; Check that memory accesses to array elements with large offsets have those
diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index ec6e978c2c68..7fc4713ac2d6 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -1,11 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
-; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IM %s
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
-; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IM %s
define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index eea8e64f2ddd..540883fdc517 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -1,11 +1,11 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=CHECK,RV32I %s
-; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=CHECK,RV32IM %s
-; RUN: llc -mtriple=riscv64 -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=CHECK,RV64I %s
-; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs -riscv-enable-sink-fold < %s \
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=CHECK,RV64IM %s
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
new file mode 100644
index 000000000000..30c16350bf2b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll
@@ -0,0 +1,84 @@
+; Modified from: https://github.com/KhronosGroup/SPIRV-LLVM-Translator/test/extensions/INTEL/SPV_INTEL_usm_storage_classes/intel_usm_addrspaces.ll
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-EXT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_INTEL_usm_storage_classes %s -o - -filetype=obj | spirv-val %}
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-WITHOUT
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-: Capability USMStorageClassesINTEL
+; CHECK-SPIRV-WITHOUT-NO: Capability USMStorageClassesINTEL
+; CHECK-SPIRV-EXT-DAG: %[[DevTy:[0-9]+]] = OpTypePointer DeviceOnlyINTEL %[[#]]
+; CHECK-SPIRV-EXT-DAG: %[[HostTy:[0-9]+]] = OpTypePointer HostOnlyINTEL %[[#]]
+; CHECK-SPIRV-DAG: %[[CrsWrkTy:[0-9]+]] = OpTypePointer CrossWorkgroup %[[#]]
+
+target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
+target triple = "spir64-unknown-unknown"
+
+define spir_kernel void @foo_kernel() {
+entry:
+ ret void
+}
+
+; CHECK-SPIRV: %[[Ptr1:[0-9]+]] = OpLoad %[[CrsWrkTy]] %[[#]]
+; CHECK-SPIRV-EXT: %[[CastedPtr1:[0-9]+]] = OpCrossWorkgroupCastToPtrINTEL %[[DevTy]] %[[Ptr1]]
+; CHECK-SPIRV-WITHOUT-NOT: OpCrossWorkgroupCastToPtrINTEL
+; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr1]]
+define spir_func void @test1(ptr addrspace(1) %arg_glob, ptr addrspace(5) %arg_dev) {
+entry:
+ %arg_glob.addr = alloca ptr addrspace(1), align 4
+ %arg_dev.addr = alloca ptr addrspace(5), align 4
+ store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4
+ store ptr addrspace(5) %arg_dev, ptr %arg_dev.addr, align 4
+ %loaded_glob = load ptr addrspace(1), ptr %arg_glob.addr, align 4
+ %casted_ptr = addrspacecast ptr addrspace(1) %loaded_glob to ptr addrspace(5)
+ store ptr addrspace(5) %casted_ptr, ptr %arg_dev.addr, align 4
+ ret void
+}
+
+; CHECK-SPIRV: %[[Ptr2:[0-9]+]] = OpLoad %[[CrsWrkTy]] %[[#]]
+; CHECK-SPIRV-EXT: %[[CastedPtr2:[0-9]+]] = OpCrossWorkgroupCastToPtrINTEL %[[HostTy]] %[[Ptr2]]
+; CHECK-SPIRV-WITHOUT-NOT: OpCrossWorkgroupCastToPtrINTEL
+; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr2]]
+define spir_func void @test2(ptr addrspace(1) %arg_glob, ptr addrspace(6) %arg_host) {
+entry:
+ %arg_glob.addr = alloca ptr addrspace(1), align 4
+ %arg_host.addr = alloca ptr addrspace(6), align 4
+ store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4
+ store ptr addrspace(6) %arg_host, ptr %arg_host.addr, align 4
+ %loaded_glob = load ptr addrspace(1), ptr %arg_glob.addr, align 4
+ %casted_ptr = addrspacecast ptr addrspace(1) %loaded_glob to ptr addrspace(6)
+ store ptr addrspace(6) %casted_ptr, ptr %arg_host.addr, align 4
+ ret void
+}
+
+; CHECK-SPIRV-EXT: %[[Ptr3:[0-9]+]] = OpLoad %[[DevTy]] %[[#]]
+; CHECK-SPIRV-EXT: %[[CastedPtr3:[0-9]+]] = OpPtrCastToCrossWorkgroupINTEL %[[CrsWrkTy]] %[[Ptr3]]
+; CHECK-SPIRV-WITHOUT-NOT: OpPtrCastToCrossWorkgroupINTEL
+; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr3]]
+define spir_func void @test3(ptr addrspace(1) %arg_glob, ptr addrspace(5) %arg_dev) {
+entry:
+ %arg_glob.addr = alloca ptr addrspace(1), align 4
+ %arg_dev.addr = alloca ptr addrspace(5), align 4
+ store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4
+ store ptr addrspace(5) %arg_dev, ptr %arg_dev.addr, align 4
+ %loaded_dev = load ptr addrspace(5), ptr %arg_dev.addr, align 4
+ %casted_ptr = addrspacecast ptr addrspace(5) %loaded_dev to ptr addrspace(1)
+ store ptr addrspace(1) %casted_ptr, ptr %arg_glob.addr, align 4
+ ret void
+}
+
+; CHECK-SPIRV-EXT: %[[Ptr4:[0-9]+]] = OpLoad %[[HostTy]] %[[#]]
+; CHECK-SPIRV-EXT: %[[CastedPtr4:[0-9]+]] = OpPtrCastToCrossWorkgroupINTEL %[[CrsWrkTy]] %[[Ptr4]]
+; CHECK-SPIRV-WITHOUT-NOT: OpPtrCastToCrossWorkgroupINTEL
+; CHECK-SPIRV-EXT: OpStore %[[#]] %[[CastedPtr4]]
+define spir_func void @test4(ptr addrspace(1) %arg_glob, ptr addrspace(6) %arg_host) {
+entry:
+ %arg_glob.addr = alloca ptr addrspace(1), align 4
+ %arg_host.addr = alloca ptr addrspace(6), align 4
+ store ptr addrspace(1) %arg_glob, ptr %arg_glob.addr, align 4
+ store ptr addrspace(6) %arg_host, ptr %arg_host.addr, align 4
+ %loaded_host = load ptr addrspace(6), ptr %arg_host.addr, align 4
+ %casted_ptr = addrspacecast ptr addrspace(6) %loaded_host to ptr addrspace(1)
+ store ptr addrspace(1) %casted_ptr, ptr %arg_glob.addr, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
new file mode 100644
index 000000000000..b1d6a09c7fe3
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_subgroup_rotate/subgroup-rotate.ll
@@ -0,0 +1,357 @@
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - | FileCheck %s
+; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown --spirv-extensions=SPV_KHR_subgroup_rotate %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-ERROR: LLVM ERROR: OpGroupNonUniformRotateKHR instruction requires the following SPIR-V extension: SPV_KHR_subgroup_rotate
+
+; CHECK: OpCapability GroupNonUniformRotateKHR
+; CHECK: OpExtension "SPV_KHR_subgroup_rotate"
+
+; CHECK-DAG: %[[TyInt8:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[TyInt16:.*]] = OpTypeInt 16 0
+; CHECK-DAG: %[[TyInt32:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[TyInt64:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[TyFloat:.*]] = OpTypeFloat 32
+; CHECK-DAG: %[[TyHalf:.*]] = OpTypeFloat 16
+; CHECK-DAG: %[[TyDouble:.*]] = OpTypeFloat 64
+; CHECK-DAG: %[[ScopeSubgroup:.*]] = OpConstant %[[TyInt32]] 3
+; CHECK-DAG: %[[ConstInt2:.*]] = OpConstant %[[TyInt32]] 2
+; CHECK-DAG: %[[ConstInt4:.*]] = OpConstant %[[TyInt32]] 4
+
+target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024"
+target triple = "spir"
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateChar(ptr addrspace(1) noundef align 1 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !5 !kernel_arg_base_type !5 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i8, align 1
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i8 0, ptr %v, align 1
+ %value = load i8, ptr %v, align 1
+; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func signext i8 @_Z16sub_group_rotateci(i8 noundef signext %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %data, i32 0
+ store i8 %call, ptr addrspace(1) %arrayidx, align 1
+ %value_clustered = load i8, ptr %v, align 1
+; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func signext i8 @_Z26sub_group_clustered_rotatecij(i8 noundef signext %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %data2, i32 1
+ store i8 %call1, ptr addrspace(1) %arrayidx2, align 1
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func signext i8 @_Z16sub_group_rotateci(i8 noundef signext, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func signext i8 @_Z26sub_group_clustered_rotatecij(i8 noundef signext, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateUChar(ptr addrspace(1) noundef align 1 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !7 !kernel_arg_base_type !7 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i8, align 1
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i8 0, ptr %v, align 1
+ %value = load i8, ptr %v, align 1
+; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func zeroext i8 @_Z16sub_group_rotatehi(i8 noundef zeroext %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i8, ptr addrspace(1) %data, i32 0
+ store i8 %call, ptr addrspace(1) %arrayidx, align 1
+ %value_clustered = load i8, ptr %v, align 1
+; CHECK: OpGroupNonUniformRotateKHR %[[TyInt8]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func zeroext i8 @_Z26sub_group_clustered_rotatehij(i8 noundef zeroext %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i8, ptr addrspace(1) %data2, i32 1
+ store i8 %call1, ptr addrspace(1) %arrayidx2, align 1
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func zeroext i8 @_Z16sub_group_rotatehi(i8 noundef zeroext, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func zeroext i8 @_Z26sub_group_clustered_rotatehij(i8 noundef zeroext, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateShort(ptr addrspace(1) noundef align 2 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !8 !kernel_arg_base_type !8 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i16, align 2
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i16 0, ptr %v, align 2
+ %value = load i16, ptr %v, align 2
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func signext i16 @_Z16sub_group_rotatesi(i16 noundef signext %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %data, i32 0
+ store i16 %call, ptr addrspace(1) %arrayidx, align 2
+ %value_clustered = load i16, ptr %v, align 2
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func signext i16 @_Z26sub_group_clustered_rotatesij(i16 noundef signext %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i16, ptr addrspace(1) %data2, i32 1
+ store i16 %call1, ptr addrspace(1) %arrayidx2, align 2
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func signext i16 @_Z16sub_group_rotatesi(i16 noundef signext, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func signext i16 @_Z26sub_group_clustered_rotatesij(i16 noundef signext, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateUShort(ptr addrspace(1) noundef align 2 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !9 !kernel_arg_base_type !9 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i16, align 2
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i16 0, ptr %v, align 2
+ %value = load i16, ptr %v, align 2
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func zeroext i16 @_Z16sub_group_rotateti(i16 noundef zeroext %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i16, ptr addrspace(1) %data, i32 0
+ store i16 %call, ptr addrspace(1) %arrayidx, align 2
+ %value_clustered = load i16, ptr %v, align 2
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt16]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func zeroext i16 @_Z26sub_group_clustered_rotatetij(i16 noundef zeroext %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i16, ptr addrspace(1) %data2, i32 1
+ store i16 %call1, ptr addrspace(1) %arrayidx2, align 2
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func zeroext i16 @_Z16sub_group_rotateti(i16 noundef zeroext, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func zeroext i16 @_Z26sub_group_clustered_rotatetij(i16 noundef zeroext, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateInt(ptr addrspace(1) noundef align 4 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !10 !kernel_arg_base_type !10 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i32, align 4
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i32 0, ptr %v, align 4
+ %value = load i32, ptr %v, align 4
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func i32 @_Z16sub_group_rotateii(i32 noundef %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %data, i32 0
+ store i32 %call, ptr addrspace(1) %arrayidx, align 4
+ %value_clustered = load i32, ptr %v, align 4
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func i32 @_Z26sub_group_clustered_rotateiij(i32 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %data2, i32 1
+ store i32 %call1, ptr addrspace(1) %arrayidx2, align 4
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func i32 @_Z16sub_group_rotateii(i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func i32 @_Z26sub_group_clustered_rotateiij(i32 noundef, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateUInt(ptr addrspace(1) noundef align 4 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !11 !kernel_arg_base_type !11 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i32, align 4
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i32 0, ptr %v, align 4
+ %value = load i32, ptr %v, align 4
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func i32 @_Z16sub_group_rotateji(i32 noundef %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i32, ptr addrspace(1) %data, i32 0
+ store i32 %call, ptr addrspace(1) %arrayidx, align 4
+ %value_clustered = load i32, ptr %v, align 4
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt32]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func i32 @_Z26sub_group_clustered_rotatejij(i32 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %data2, i32 1
+ store i32 %call1, ptr addrspace(1) %arrayidx2, align 4
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func i32 @_Z16sub_group_rotateji(i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func i32 @_Z26sub_group_clustered_rotatejij(i32 noundef, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateLong(ptr addrspace(1) noundef align 8 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !12 !kernel_arg_base_type !12 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i64, align 8
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i64 0, ptr %v, align 8
+ %value = load i64, ptr %v, align 8
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func i64 @_Z16sub_group_rotateli(i64 noundef %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i64, ptr addrspace(1) %data, i32 0
+ store i64 %call, ptr addrspace(1) %arrayidx, align 8
+ %value_clustered = load i64, ptr %v, align 8
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func i64 @_Z26sub_group_clustered_rotatelij(i64 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i64, ptr addrspace(1) %data2, i32 1
+ store i64 %call1, ptr addrspace(1) %arrayidx2, align 8
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func i64 @_Z16sub_group_rotateli(i64 noundef, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func i64 @_Z26sub_group_clustered_rotatelij(i64 noundef, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateULong(ptr addrspace(1) noundef align 8 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !13 !kernel_arg_base_type !13 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca i64, align 8
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store i64 0, ptr %v, align 8
+ %value = load i64, ptr %v, align 8
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func i64 @_Z16sub_group_rotatemi(i64 noundef %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds i64, ptr addrspace(1) %data, i32 0
+ store i64 %call, ptr addrspace(1) %arrayidx, align 8
+ %value_clustered = load i64, ptr %v, align 8
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyInt64]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func i64 @_Z26sub_group_clustered_rotatemij(i64 noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds i64, ptr addrspace(1) %data2, i32 1
+ store i64 %call1, ptr addrspace(1) %arrayidx2, align 8
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func i64 @_Z16sub_group_rotatemi(i64 noundef, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func i64 @_Z26sub_group_clustered_rotatemij(i64 noundef, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateFloat(ptr addrspace(1) noundef align 4 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !14 !kernel_arg_base_type !14 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca float, align 4
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store float 0.000000e+00, ptr %v, align 4
+ %value = load float, ptr %v, align 4
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyFloat]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func float @_Z16sub_group_rotatefi(float noundef %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds float, ptr addrspace(1) %data, i32 0
+ store float %call, ptr addrspace(1) %arrayidx, align 4
+ %value_clustered = load float, ptr %v, align 4
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyFloat]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func float @_Z26sub_group_clustered_rotatefij(float noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr addrspace(1) %data2, i32 1
+ store float %call1, ptr addrspace(1) %arrayidx2, align 4
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func float @_Z16sub_group_rotatefi(float noundef, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func float @_Z26sub_group_clustered_rotatefij(float noundef, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateHalf(ptr addrspace(1) noundef align 2 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !15 !kernel_arg_base_type !15 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca half, align 2
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store half 0xH0000, ptr %v, align 2
+ %value = load half, ptr %v, align 2
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyHalf]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func half @_Z16sub_group_rotateDhi(half noundef %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds half, ptr addrspace(1) %data, i32 0
+ store half %call, ptr addrspace(1) %arrayidx, align 2
+ %value_clustered = load half, ptr %v, align 2
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyHalf]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func half @_Z26sub_group_clustered_rotateDhij(half noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds half, ptr addrspace(1) %data2, i32 1
+ store half %call1, ptr addrspace(1) %arrayidx2, align 2
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func half @_Z16sub_group_rotateDhi(half noundef, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func half @_Z26sub_group_clustered_rotateDhij(half noundef, i32 noundef, i32 noundef) #1
+
+; Function Attrs: convergent noinline norecurse nounwind optnone
+define dso_local spir_kernel void @testRotateDouble(ptr addrspace(1) noundef align 8 %dst) #0 !kernel_arg_addr_space !3 !kernel_arg_access_qual !4 !kernel_arg_type !16 !kernel_arg_base_type !16 !kernel_arg_type_qual !6 {
+entry:
+ %dst.addr = alloca ptr addrspace(1), align 4
+ %v = alloca double, align 8
+ store ptr addrspace(1) %dst, ptr %dst.addr, align 4
+ store double 0.000000e+00, ptr %v, align 8
+ %value = load double, ptr %v, align 8
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyDouble]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]]
+ %call = call spir_func double @_Z16sub_group_rotatedi(double noundef %value, i32 noundef 2) #2
+ %data = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx = getelementptr inbounds double, ptr addrspace(1) %data, i32 0
+ store double %call, ptr addrspace(1) %arrayidx, align 8
+ %value_clustered = load double, ptr %v, align 8
+ ; CHECK: OpGroupNonUniformRotateKHR %[[TyDouble]] %[[ScopeSubgroup]] %[[#]] %[[ConstInt2]] %[[ConstInt4]]
+ %call1 = call spir_func double @_Z26sub_group_clustered_rotatedij(double noundef %value_clustered, i32 noundef 2, i32 noundef 4) #2
+ %data2 = load ptr addrspace(1), ptr %dst.addr, align 4
+ %arrayidx2 = getelementptr inbounds double, ptr addrspace(1) %data2, i32 1
+ store double %call1, ptr addrspace(1) %arrayidx2, align 8
+ ret void
+}
+
+; Function Attrs: convergent nounwind
+declare spir_func double @_Z16sub_group_rotatedi(double noundef, i32 noundef) #1
+
+; Function Attrs: convergent nounwind
+declare spir_func double @_Z26sub_group_clustered_rotatedij(double noundef, i32 noundef, i32 noundef) #1
+
+attributes #0 = { convergent noinline norecurse nounwind optnone "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="false" }
+attributes #1 = { convergent nounwind "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
+attributes #2 = { convergent nounwind }
+
+!llvm.module.flags = !{!0}
+!opencl.ocl.version = !{!1}
+!opencl.spir.version = !{!1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 2, i32 0}
+!2 = !{!"clang version 19.0.0"}
+!3 = !{i32 1}
+!4 = !{!"none"}
+!5 = !{!"char*"}
+!6 = !{!""}
+!7 = !{!"uchar*"}
+!8 = !{!"short*"}
+!9 = !{!"ushort*"}
+!10 = !{!"int*"}
+!11 = !{!"uint*"}
+!12 = !{!"long*"}
+!13 = !{!"ulong*"}
+!14 = !{!"float*"}
+!15 = !{!"half*"}
+!16 = !{!"double*"}
diff --git a/llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll b/llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll
new file mode 100644
index 000000000000..c9c0f17f0b91
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/switch-no-jump-table.ll
@@ -0,0 +1,30 @@
+; The test is to check that jump tables are not generated from switch
+
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpSwitch %[[#]] %[[Label:]]
+; CHECK-4: OpBranch %[[Label]]
+
+target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
+target triple = "spir64-unknown-unknown"
+
+define spir_func void @foo(i32 noundef %val) {
+entry:
+ switch i32 %val, label %sw.epilog [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb2
+ i32 2, label %sw.bb3
+ i32 3, label %sw.bb4
+ ]
+sw.bb:
+ br label %sw.epilog
+sw.bb2:
+ br label %sw.epilog
+sw.bb3:
+ br label %sw.epilog
+sw.bb4:
+ br label %sw.epilog
+sw.epilog:
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/fp-strict-conv-17.ll b/llvm/test/CodeGen/SystemZ/fp-strict-conv-17.ll
index eb0ff4b82560..3ff63242a6d8 100644
--- a/llvm/test/CodeGen/SystemZ/fp-strict-conv-17.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-strict-conv-17.ll
@@ -20,7 +20,7 @@ declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.f32(float, metadata)
; Test signed i128->f128.
-define fp128 @f1(i128 %i) {
+define fp128 @f1(i128 %i) #0 {
; CHECK-LABEL: f1:
; CHECK: brasl %r14, __floattitf@PLT
; CHECK: br %r14
@@ -31,7 +31,7 @@ define fp128 @f1(i128 %i) {
}
; Test signed i128->f64.
-define double @f2(i128 %i) {
+define double @f2(i128 %i) #0 {
; CHECK-LABEL: f2:
; CHECK: brasl %r14, __floattidf@PLT
; CHECK: br %r14
@@ -42,7 +42,7 @@ define double @f2(i128 %i) {
}
; Test signed i128->f32.
-define float @f3(i128 %i) {
+define float @f3(i128 %i) #0 {
; CHECK-LABEL: f3:
; CHECK: brasl %r14, __floattisf@PLT
; CHECK: br %r14
@@ -53,7 +53,7 @@ define float @f3(i128 %i) {
}
; Test unsigned i128->f128.
-define fp128 @f4(i128 %i) {
+define fp128 @f4(i128 %i) #0 {
; CHECK-LABEL: f4:
; CHECK: brasl %r14, __floatuntitf@PLT
; CHECK: br %r14
@@ -64,7 +64,7 @@ define fp128 @f4(i128 %i) {
}
; Test unsigned i128->f64.
-define double @f5(i128 %i) {
+define double @f5(i128 %i) #0 {
; CHECK-LABEL: f5:
; CHECK: brasl %r14, __floatuntidf@PLT
; CHECK: br %r14
@@ -75,7 +75,7 @@ define double @f5(i128 %i) {
}
; Test unsigned i128->f32.
-define float @f6(i128 %i) {
+define float @f6(i128 %i) #0 {
; CHECK-LABEL: f6:
; CHECK: brasl %r14, __floatuntisf@PLT
; CHECK: br %r14
@@ -86,7 +86,7 @@ define float @f6(i128 %i) {
}
; Test signed f128->i128.
-define i128 @f7(fp128 %f) {
+define i128 @f7(fp128 %f) #0 {
; CHECK-LABEL: f7:
; CHECK: brasl %r14, __fixtfti@PLT
; CHECK: br %r14
@@ -96,7 +96,7 @@ define i128 @f7(fp128 %f) {
}
; Test signed f64->i128.
-define i128 @f8(double %f) {
+define i128 @f8(double %f) #0 {
; CHECK-LABEL: f8:
; CHECK: brasl %r14, __fixdfti@PLT
; CHECK: br %r14
@@ -106,7 +106,7 @@ define i128 @f8(double %f) {
}
; Test signed f9->i128.
-define i128 @f9(float %f) {
+define i128 @f9(float %f) #0 {
; CHECK-LABEL: f9:
; CHECK: brasl %r14, __fixsfti@PLT
; CHECK: br %r14
@@ -116,7 +116,7 @@ define i128 @f9(float %f) {
}
; Test unsigned f128->i128.
-define i128 @f10(fp128 %f) {
+define i128 @f10(fp128 %f) #0 {
; CHECK-LABEL: f10:
; CHECK: brasl %r14, __fixunstfti@PLT
; CHECK: br %r14
@@ -126,7 +126,7 @@ define i128 @f10(fp128 %f) {
}
; Test unsigned f64->i128.
-define i128 @f11(double %f) {
+define i128 @f11(double %f) #0 {
; CHECK-LABEL: f11:
; CHECK: brasl %r14, __fixunsdfti@PLT
; CHECK: br %r14
@@ -136,7 +136,7 @@ define i128 @f11(double %f) {
}
; Test unsigned f32->i128.
-define i128 @f12(float %f) {
+define i128 @f12(float %f) #0 {
; CHECK-LABEL: f12:
; CHECK: brasl %r14, __fixunssfti@PLT
; CHECK: br %r14
diff --git a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
index 4f33439db770..daf46c6eef02 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-em-ehsjlj-multi-return.ll
@@ -1,5 +1,5 @@
-; RUN: not --crash llc < %s -enable-emscripten-cxx-exceptions -mattr=+multivalue 2>&1 | FileCheck %s --check-prefix=EH
-; RUN: not --crash llc < %s -enable-emscripten-sjlj -mattr=+multivalue 2>&1 | FileCheck %s --check-prefix=SJLJ
+; RUN: not --crash llc < %s -enable-emscripten-cxx-exceptions -mattr=+multivalue -wasm-emit-multivalue 2>&1 | FileCheck %s --check-prefix=EH
+; RUN: not --crash llc < %s -enable-emscripten-sjlj -mattr=+multivalue 2>&1 -wasm-emit-multivalue | FileCheck %s --check-prefix=SJLJ
; Currently multivalue returning functions are not supported in Emscripten EH /
; SjLj. Make sure they error out.
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir b/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir
index 4b4661b14466..4fadbd5f07e6 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir
+++ b/llvm/test/CodeGen/WebAssembly/multivalue-dont-move-def-past-use.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=wasm32-unknown-unknown -mattr=+multivalue -run-pass=wasm-reg-stackify -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=wasm32-unknown-unknown -mattr=+multivalue -wasm-emit-multivalue -run-pass=wasm-reg-stackify -verify-machineinstrs %s -o - | FileCheck %s
--- |
target datalayout = "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20"
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll
index 52a8c686824d..f4f93ac2f30c 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll
+++ b/llvm/test/CodeGen/WebAssembly/multivalue-stackify.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; NOTE: Test functions have been generated by multivalue-stackify.py.
-; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mattr=+multivalue -wasm-emit-multivalue | FileCheck %s
; Test that the multivalue stackification works
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue.ll b/llvm/test/CodeGen/WebAssembly/multivalue.ll
index 675009c8f3e5..846691e5ff0c 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue.ll
+++ b/llvm/test/CodeGen/WebAssembly/multivalue.ll
@@ -1,7 +1,8 @@
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+multivalue,+tail-call | FileCheck %s
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+reference-types,+multivalue,+tail-call | FileCheck --check-prefix REF %s
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mcpu=mvp -mattr=+multivalue,+tail-call | FileCheck %s --check-prefix REGS
-; RUN: llc < %s --filetype=obj -mcpu=mvp -mattr=+multivalue,+tail-call | obj2yaml | FileCheck %s --check-prefix OBJ
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+multivalue,+tail-call -wasm-emit-multivalue | FileCheck %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+reference-types,+multivalue,+tail-call -wasm-emit-multivalue | FileCheck --check-prefix REF %s
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mcpu=mvp -mattr=+multivalue,+tail-call -wasm-emit-multivalue | FileCheck %s --check-prefix REGS
+; RUN: llc < %s --filetype=obj -mcpu=mvp -mattr=+multivalue,+tail-call -wasm-emit-multivalue | obj2yaml | FileCheck %s --check-prefix OBJ
+; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mcpu=mvp -mattr=+multivalue,+tail-call | FileCheck %s --check-prefix NO-MULTIVALUE
; Test that the multivalue calls, returns, function types, and block
; types work as expected.
@@ -19,6 +20,7 @@ declare void @use_i64(i64)
; CHECK-NEXT: i32.const 42{{$}}
; CHECK-NEXT: i64.const 42{{$}}
; CHECK-NEXT: end_function{{$}}
+; NO-MULTIVALUE-NOT: .functype pair_const () -> (i32, i64)
define %pair @pair_const() {
ret %pair { i32 42, i64 42 }
}
diff --git a/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll b/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll
index 47c5ae7b457d..7bf37b59353a 100644
--- a/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll
+++ b/llvm/test/CodeGen/WebAssembly/multivalue_libcall.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc < %s -verify-machineinstrs -mcpu=mvp -mattr=+multivalue | FileCheck %s --check-prefix=MULTIVALUE
+; RUN: llc < %s -verify-machineinstrs -mcpu=mvp -mattr=+multivalue -wasm-emit-multivalue | FileCheck %s --check-prefix=MULTIVALUE
; RUN: llc < %s -verify-machineinstrs -mcpu=mvp | FileCheck %s --check-prefix=NO_MULTIVALUE
; Test libcall signatures when multivalue is enabled and disabled
diff --git a/llvm/test/CodeGen/X86/GlobalISel/calllowering-tailcall.ll b/llvm/test/CodeGen/X86/GlobalISel/calllowering-tailcall.ll
new file mode 100644
index 000000000000..6a856c32eb26
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/calllowering-tailcall.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i686-linux-gnu -global-isel < %s | FileCheck %s --check-prefix=X86
+
+declare ptr @foo()
+
+define ptr @aligned_tailcall() nounwind {
+; X64-LABEL: aligned_tailcall:
+; X64: # %bb.0: # %entry
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq foo
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+;
+; X86-LABEL: aligned_tailcall:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: calll foo
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+entry:
+ %call = tail call align 8 ptr @foo()
+ ret ptr %call
+}
diff --git a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
index 401ed9f7bc5a..8a9ee60f341c 100644
--- a/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
+++ b/llvm/test/CodeGen/X86/tailcall-cgp-dup.ll
@@ -339,7 +339,7 @@ return:
define ptr @strcpy_illegal_tailc(ptr %dest, i64 %sz, ptr readonly returned %src) nounwind {
; CHECK-LABEL: strcpy_illegal_tailc:
-; CHECK: ## %bb.0:
+; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movq %rdx, %rbx
; CHECK-NEXT: testq %rsi, %rsi
@@ -351,6 +351,7 @@ define ptr @strcpy_illegal_tailc(ptr %dest, i64 %sz, ptr readonly returned %src)
; CHECK-NEXT: movq %rbx, %rax
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: retq
+entry:
%cmp = icmp eq i64 %sz, 0
br i1 %cmp, label %return, label %if.then
@@ -362,8 +363,63 @@ return:
ret ptr %src
}
+@i = global i32 0, align 4
+
+define i32 @undef_tailc() nounwind {
+; CHECK-LABEL: undef_tailc:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: cmpl $0, _i(%rip)
+; CHECK-NEXT: jne _qux ## TAILCALL
+; CHECK-NEXT: ## %bb.1: ## %return
+; CHECK-NEXT: retq
+entry:
+ %val = load i32, ptr @i, align 4
+ %cmp = icmp eq i32 %val, 0
+ br i1 %cmp, label %return, label %if.then
+
+if.then:
+ %rv_unused = tail call i32 @qux()
+ br label %return
+
+return:
+ ret i32 undef
+}
+
+define i32 @undef_and_known_tailc() nounwind {
+; CHECK-LABEL: undef_and_known_tailc:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: movl _i(%rip), %eax
+; CHECK-NEXT: cmpl $5, %eax
+; CHECK-NEXT: je _qux ## TAILCALL
+; CHECK-NEXT: ## %bb.1: ## %entry
+; CHECK-NEXT: cmpl $2, %eax
+; CHECK-NEXT: je _quux ## TAILCALL
+; CHECK-NEXT: ## %bb.2: ## %return
+; CHECK-NEXT: retq
+entry:
+ %val = load i32, ptr @i, align 4
+ switch i32 %val, label %return [
+ i32 2, label %case_2
+ i32 5, label %case_5
+ ]
+
+case_2:
+ %rv_unused = tail call i32 @quux()
+ br label %return
+
+case_5:
+ %rv = tail call i32 @qux()
+ br label %return
+
+return:
+ %phi = phi i32 [ undef, %case_2 ], [ %rv, %case_5 ], [ undef, %entry ]
+ ret i32 %phi
+}
+
declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1)
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1)
declare noalias ptr @malloc(i64)
declare ptr @strcpy(ptr noalias returned writeonly, ptr noalias nocapture readonly)
declare ptr @baz(ptr, ptr)
+declare i32 @qux()
+declare i32 @quux()
diff --git a/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test b/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test
index 33ad5515a635..ec71011d545e 100644
--- a/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test
+++ b/llvm/test/ExecutionEngine/JITLink/Generic/sectcreate.test
@@ -5,4 +5,7 @@
#
# Use -sectcreate to create a section from a data file.
+# Jitlink does not support ARM64 COFF files.
+# UNSUPPORTED: target=aarch64-pc-windows-{{.*}}
+
# jitlink-check: *{4}foo = 0x2a2a5a5a \ No newline at end of file
diff --git a/llvm/test/MC/AMDGPU/gfx11-promotions.s b/llvm/test/MC/AMDGPU/gfx11-promotions.s
index 0bd90266457e..67e7beaa262f 100644
--- a/llvm/test/MC/AMDGPU/gfx11-promotions.s
+++ b/llvm/test/MC/AMDGPU/gfx11-promotions.s
@@ -337,17 +337,17 @@ v_dot2_f32_f16_e64 v0, v1, v2, v3
//===----------------------------------------------------------------------===//
v_dot2_f32_f16 v0, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x00,0x13,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x40,0x13,0xcc,0xe9,0x04,0x0e,0x1c,0x01,0x77,0x39,0x05]
v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x00,0x13,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x40,0x13,0xcc,0xe9,0x04,0x0e,0x1c,0x01,0x77,0x39,0x05]
//===----------------------------------------------------------------------===//
// VOP3P.DPP16.
//===----------------------------------------------------------------------===//
v_dot2_f32_f16 v0, v1, v2, v3 quad_perm:[1,2,3,0]
-// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x39,0x00,0xff]
+// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x39,0x00,0xff]
v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[1,2,3,0]
-// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x39,0x00,0xff]
+// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x39,0x00,0xff]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp16.s
index 2cfb8abd4e97..3ff4ed27f1b2 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp16.s
@@ -2,10 +2,10 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX11 %s
v_dot2_f32_f16 v0, v1, v2, v3 neg_lo:[0,0,0] neg_hi:[0,0,0] quad_perm:[2,2,3,1] bound_ctrl:0 fi:1
-// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x7a,0x04,0xff]
+// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x7a,0x04,0xff]
v_dot2_f32_f16 v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] bank_mask:0xe
-// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x05,0x13,0xcc,0xfa,0x04,0x0e,0x64,0x01,0x1b,0x00,0xfe]
+// GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x45,0x13,0xcc,0xfa,0x04,0x0e,0x7c,0x01,0x1b,0x00,0xfe]
v_fma_mix_f32 v0, v1, v2, v3 op_sel:[0,0,0] row_ror:7 bank_mask:0x1 bound_ctrl:0
// GFX11: v_fma_mix_f32_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 ; encoding: [0x00,0x00,0x20,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp8.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp8.s
index 2656ba0cf180..3fb993dc8bec 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3p_dpp8.s
@@ -15,4 +15,4 @@ v_fma_mixlo_f16 v0, abs(v1), -v2, abs(v3) op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:
// GFX11: encoding: [0x00,0x0d,0x21,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[0,1,1] neg_hi:[1,0,1] dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: encoding: [0x00,0x05,0x13,0xcc,0xe9,0x04,0x0e,0xc4,0x01,0x77,0x39,0x05]
+// GFX11: encoding: [0x00,0x45,0x13,0xcc,0xe9,0x04,0x0e,0xdc,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp16.s
index 75bd1696e10b..a6360684f1d0 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp16.s
@@ -2,10 +2,10 @@
// RUN: llvm-mc -arch=amdgcn -mcpu=gfx1200 -mattr=-wavefrontsize32,+wavefrontsize64 -show-encoding %s | FileCheck --check-prefixes=GFX12 %s
v_dot2_f32_f16 v0, v1, v2, v3 neg_lo:[0,0,0] neg_hi:[0,0,0] quad_perm:[2,2,3,1] bound_ctrl:0 fi:1
-// GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x7a,0x04,0xff]
+// GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x7a,0x04,0xff]
v_dot2_f32_f16 v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] bank_mask:0xe
-// GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x05,0x13,0xcc,0xfa,0x04,0x0e,0x64,0x01,0x1b,0x00,0xfe]
+// GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x45,0x13,0xcc,0xfa,0x04,0x0e,0x7c,0x01,0x1b,0x00,0xfe]
v_fma_mix_f32 v0, v1, v2, v3 op_sel:[0,0,0] row_ror:7 bank_mask:0x1 bound_ctrl:0
// GFX12: v_fma_mix_f32_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 ; encoding: [0x00,0x00,0x20,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp8.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp8.s
index 14cf169d4b42..299339339e8c 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3p_dpp8.s
@@ -15,7 +15,7 @@ v_fma_mixlo_f16 v0, abs(v1), -v2, abs(v3) op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:
// GFX12: encoding: [0x00,0x0d,0x21,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[0,1,1] neg_hi:[1,0,1] dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: encoding: [0x00,0x05,0x13,0xcc,0xe9,0x04,0x0e,0xc4,0x01,0x77,0x39,0x05]
+// GFX12: encoding: [0x00,0x45,0x13,0xcc,0xe9,0x04,0x0e,0xdc,0x01,0x77,0x39,0x05]
v_dot4_f32_fp8_bf8 v0, v1, v2, v3 dpp8:[0,1,2,3,4,5,6,7]
// GFX12: v_dot4_f32_fp8_bf8_e64_dpp v0, v1, v2, v3 dpp8:[0,1,2,3,4,5,6,7] ; encoding: [0x00,0x40,0x24,0xcc,0xe9,0x04,0x0e,0x1c,0x01,0x88,0xc6,0xfa]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp16.txt
index 6b230367c831..ceca6d9fc3fa 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp16.txt
@@ -1,11 +1,11 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX11 %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-WavefrontSize32,+WavefrontSize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX11 %s
-# GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x05,0x13,0xcc,0xfa,0x04,0x0e,0x64,0x01,0x1b,0x00,0xfe]
-0x00,0x05,0x13,0xcc,0xfa,0x04,0x0e,0x64,0x01,0x1b,0x00,0xfe
+# GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x45,0x13,0xcc,0xfa,0x04,0x0e,0x7c,0x01,0x1b,0x00,0xfe]
+0x00,0x45,0x13,0xcc,0xfa,0x04,0x0e,0x7c,0x01,0x1b,0x00,0xfe
-# GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf bound_ctrl:1 fi:1 ; encoding: [0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x7a,0x0c,0xff]
-0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x7a,0x0c,0xff
+# GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf bound_ctrl:1 fi:1 ; encoding: [0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x7a,0x0c,0xff]
+0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x7a,0x0c,0xff
# GFX11: v_fma_mix_f32_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 bound_ctrl:1 ; encoding: [0x00,0x00,0x20,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x09,0xf1]
0x00,0x00,0x20,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x09,0xf1
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp8.txt
index 89c9b54d7cfe..57c96170eadc 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3p_dpp8.txt
@@ -1,8 +1,8 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX11 %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1100 -mattr=-WavefrontSize32,+WavefrontSize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX11 %s
-# GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[0,1,1] neg_hi:[1,0,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x05,0x13,0xcc,0xe9,0x04,0x0e,0xc4,0x01,0x77,0x39,0x05]
-0x00,0x05,0x13,0xcc,0xe9,0x04,0x0e,0xc4,0x01,0x77,0x39,0x05
+# GFX11: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[0,1,1] neg_hi:[1,0,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x45,0x13,0xcc,0xe9,0x04,0x0e,0xdc,0x01,0x77,0x39,0x05]
+0x00,0x45,0x13,0xcc,0xe9,0x04,0x0e,0xdc,0x01,0x77,0x39,0x05
# GFX11: v_fma_mix_f32_e64_dpp v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1 ; encoding: [0x00,0x80,0x20,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
0x00,0x80,0x20,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp16.txt
index 52fd0530681c..10f438465d65 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp16.txt
@@ -1,11 +1,11 @@
# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1200 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX12 %s
# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1200 -mattr=-wavefrontsize32,+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX12 %s
-# GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x05,0x13,0xcc,0xfa,0x04,0x0e,0x64,0x01,0x1b,0x00,0xfe]
-0x00,0x05,0x13,0xcc,0xfa,0x04,0x0e,0x64,0x01,0x1b,0x00,0xfe
+# GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[1,1,0] neg_hi:[1,0,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xe ; encoding: [0x00,0x45,0x13,0xcc,0xfa,0x04,0x0e,0x7c,0x01,0x1b,0x00,0xfe]
+0x00,0x45,0x13,0xcc,0xfa,0x04,0x0e,0x7c,0x01,0x1b,0x00,0xfe
-# GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf bound_ctrl:1 fi:1 ; encoding: [0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x7a,0x0c,0xff]
-0x00,0x00,0x13,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x7a,0x0c,0xff
+# GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 quad_perm:[2,2,3,1] row_mask:0xf bank_mask:0xf bound_ctrl:1 fi:1 ; encoding: [0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x7a,0x0c,0xff]
+0x00,0x40,0x13,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x7a,0x0c,0xff
# GFX12: v_fma_mix_f32_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 bound_ctrl:1 ; encoding: [0x00,0x00,0x20,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x09,0xf1]
0x00,0x00,0x20,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x09,0xf1
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp8.txt
index 688212e51c42..2fb9c23ed5ec 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3p_dpp8.txt
@@ -1,8 +1,8 @@
# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1200 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX12 %s
# RUN: llvm-mc -arch=amdgcn -mcpu=gfx1200 -mattr=-wavefrontsize32,+wavefrontsize64 -disassemble -show-encoding < %s | FileCheck -check-prefix=GFX12 %s
-# GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[0,1,1] neg_hi:[1,0,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x05,0x13,0xcc,0xe9,0x04,0x0e,0xc4,0x01,0x77,0x39,0x05]
-0x00,0x05,0x13,0xcc,0xe9,0x04,0x0e,0xc4,0x01,0x77,0x39,0x05
+# GFX12: v_dot2_f32_f16_e64_dpp v0, v1, v2, v3 neg_lo:[0,1,1] neg_hi:[1,0,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x00,0x45,0x13,0xcc,0xe9,0x04,0x0e,0xdc,0x01,0x77,0x39,0x05]
+0x00,0x45,0x13,0xcc,0xe9,0x04,0x0e,0xdc,0x01,0x77,0x39,0x05
# GFX12: v_fma_mix_f32_e64_dpp v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1 ; encoding: [0x00,0x80,0x20,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
0x00,0x80,0x20,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index 51fb93daa4df..285077ff8e31 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -72,10 +72,6 @@
; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-DEFAULT,CHECK-O3,%llvmcheckext,CHECK-EP-OPTIMIZER-LAST,CHECK-O23SZ
; RUN: opt -disable-verify -verify-analysis-invalidation=0 -eagerly-invalidate-analyses=0 -debug-pass-manager \
-; RUN: -passes='default<O3>' -enable-jump-table-to-switch -S %s 2>&1 \
-; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-DEFAULT,CHECK-O3,CHECK-JUMP-TABLE-TO-SWITCH,CHECK-O23SZ,%llvmcheckext
-
-; RUN: opt -disable-verify -verify-analysis-invalidation=0 -eagerly-invalidate-analyses=0 -debug-pass-manager \
; RUN: -passes='default<O3>' -enable-matrix -S %s 2>&1 \
; RUN: | FileCheck %s --check-prefixes=CHECK-O,CHECK-DEFAULT,CHECK-O3,CHECK-O23SZ,%llvmcheckext,CHECK-MATRIX
@@ -155,7 +151,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
-; CHECK-JUMP-TABLE-TO-SWITCH-NEXT: Running pass: JumpTableToSwitchPass
+; CHECK-O23SZ-NEXT: Running pass: JumpTableToSwitchPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O23SZ-NEXT: Running pass: AggressiveInstCombinePass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
index 064362eabbf8..29a4d7903742 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
@@ -90,6 +90,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
+; CHECK-O23SZ-NEXT: Running pass: JumpTableToSwitchPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O23SZ-NEXT: Running pass: AggressiveInstCombinePass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index 19a44867e434..bf06782c86f8 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -78,6 +78,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
+; CHECK-O23SZ-NEXT: Running pass: JumpTableToSwitchPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O23SZ-NEXT: Running pass: AggressiveInstCombinePass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index ac80a31d8fd4..0cc61121de01 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -86,6 +86,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
+; CHECK-O23SZ-NEXT: Running pass: JumpTableToSwitchPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O23SZ-NEXT: Running pass: AggressiveInstCombinePass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
index 6486639e07b4..0e5839797afe 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
@@ -121,6 +121,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
+; CHECK-O23SZ-NEXT: Running pass: JumpTableToSwitchPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O23SZ-NEXT: Running pass: AggressiveInstCombinePass
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
index 09f9f0f48bad..68c2e5814630 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
@@ -118,6 +118,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
+; CHECK-O23SZ-NEXT: Running pass: JumpTableToSwitchPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running analysis: BlockFrequencyAnalysis on foo
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
index 47bdbfd2d357..8311a009711d 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
@@ -90,6 +90,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LazyValueAnalysis
; CHECK-O23SZ-NEXT: Running pass: CorrelatedValuePropagationPass
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
+; CHECK-O23SZ-NEXT: Running pass: JumpTableToSwitchPass
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O23SZ-NEXT: Running pass: AggressiveInstCombinePass
diff --git a/llvm/test/TableGen/HwModeEncodeDecode2.td b/llvm/test/TableGen/HwModeEncodeDecode2.td
new file mode 100644
index 000000000000..5159501d8148
--- /dev/null
+++ b/llvm/test/TableGen/HwModeEncodeDecode2.td
@@ -0,0 +1,119 @@
+// RUN: llvm-tblgen -gen-disassembler -I %p/../../include %s | \
+// RUN: FileCheck %s --check-prefix=DECODER
+// RUN: llvm-tblgen -gen-disassembler --suppress-per-hwmode-duplicates -I \
+// RUN: %p/../../include %s | FileCheck %s --check-prefix=DECODER-SUPPRESS
+
+// Test duplicate table suppression for per-HwMode decoders.
+
+include "llvm/Target/Target.td"
+
+def archInstrInfo : InstrInfo { }
+
+def arch : Target {
+ let InstructionSet = archInstrInfo;
+}
+
+def Myi32 : Operand<i32> {
+ let DecoderMethod = "DecodeMyi32";
+}
+
+def HasA : Predicate<"Subtarget->hasA()">;
+def HasB : Predicate<"Subtarget->hasB()">;
+
+def ModeA : HwMode<"+a", [HasA]>;
+def ModeB : HwMode<"+b", [HasB]>;
+
+
+def fooTypeEncA : InstructionEncoding {
+ let Size = 4;
+ field bits<32> SoftFail = 0;
+ bits<32> Inst;
+ bits<8> factor;
+ let Inst{7...0} = factor;
+ let Inst{3...2} = 0b11;
+ let Inst{1...0} = 0b00;
+}
+
+def fooTypeEncB : InstructionEncoding {
+ let Size = 4;
+ field bits<32> SoftFail = 0;
+ bits<32> Inst;
+ bits<8> factor;
+ let Inst{15...8} = factor;
+ let Inst{1...0} = 0b11;
+}
+
+let OutOperandList = (outs) in {
+ def foo : Instruction {
+ let InOperandList = (ins i32imm:$factor);
+ let EncodingInfos = EncodingByHwMode<
+ [ModeA, ModeB], [fooTypeEncA, fooTypeEncB]
+ >;
+ let AsmString = "foo $factor";
+ }
+
+ // Encoding not overridden, same namespace:
+ // In the default case, this instruction is duplicated into both ModeA and
+ // ModeB decoder tables.
+ // In the suppressed case, this instruction appears in a single decoder table.
+ def bar: Instruction {
+ let InOperandList = (ins i32imm:$factor);
+ let Size = 4;
+ bits<32> Inst;
+ bits<32> SoftFail;
+ bits<8> factor;
+ let Inst{31...24} = factor;
+ let Inst{1...0} = 0b10;
+ let AsmString = "bar $factor";
+ }
+
+ def baz : Instruction {
+ let InOperandList = (ins i32imm:$factor);
+ bits<32> Inst;
+ let EncodingInfos = EncodingByHwMode<
+ [ModeB], [fooTypeEncA]
+ >;
+ let AsmString = "foo $factor";
+ }
+
+ // Encoding not overridden, different namespace:
+ // In the default case, this instruction is duplicated into two Alt decoder
+ // tables (ModeA and ModeB).
+ // In the suppressed case, this instruction appears in a single decoder table.
+ def unrelated: Instruction {
+ let DecoderNamespace = "Alt";
+ let InOperandList = (ins i32imm:$factor);
+ let Size = 4;
+ bits<32> Inst;
+ bits<32> SoftFail;
+ bits<8> factor;
+ let Inst{31...24} = factor;
+ let Inst{1...0} = 0b10;
+ let AsmString = "unrelated $factor";
+ }
+}
+
+// DECODER-LABEL: DecoderTableAlt_ModeA32[] =
+// DECODER-DAG: Opcode: unrelated
+// DECODER-LABEL: DecoderTableAlt_ModeB32[] =
+// DECODER-DAG: Opcode: unrelated
+// DECODER-LABEL: DecoderTable_ModeA32[] =
+// DECODER-DAG: Opcode: fooTypeEncA:foo
+// DECODER-DAG: Opcode: bar
+// DECODER-LABEL: DecoderTable_ModeB32[] =
+// DECODER-DAG: Opcode: fooTypeEncB:foo
+// DECODER-DAG: Opcode: fooTypeEncA:baz
+// DECODER-DAG: Opcode: bar
+
+
+// DECODER-SUPPRESS-LABEL: DecoderTableAlt_AllModes32[] =
+// DECODER-SUPPRESS-DAG: Opcode: unrelated
+// DECODER-SUPPRESS-LABEL: DecoderTable_AllModes32[] =
+// DECODER-SUPPRESS-DAG: Opcode: bar
+// DECODER-SUPPRESS-LABEL: DecoderTable_ModeA32[] =
+// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncA:foo
+// DECODER-SUPPRESS-NOT: Opcode: bar
+// DECODER-SUPPRESS-LABEL: DecoderTable_ModeB32[] =
+// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncB:foo
+// DECODER-SUPPRESS-DAG: Opcode: fooTypeEncA:baz
+// DECODER-SUPPRESS-NOT: Opcode: bar
diff --git a/llvm/test/Transforms/ConstraintElimination/minmax.ll b/llvm/test/Transforms/ConstraintElimination/minmax.ll
index ab3e9f381245..029b6508a210 100644
--- a/llvm/test/Transforms/ConstraintElimination/minmax.ll
+++ b/llvm/test/Transforms/ConstraintElimination/minmax.ll
@@ -611,8 +611,7 @@ define i64 @pr82271(i32 %a, i32 %b){
; CHECK-NEXT: [[SA:%.*]] = sext i32 [[A]] to i64
; CHECK-NEXT: [[SB:%.*]] = sext i32 [[B]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
-; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[SB]], i64 [[ADD]])
-; CHECK-NEXT: ret i64 [[SMAX]]
+; CHECK-NEXT: ret i64 [[SB]]
; CHECK: else:
; CHECK-NEXT: ret i64 0
;
@@ -641,8 +640,7 @@ define i64 @pr82271_sext_zext_nneg(i32 %a, i32 %b){
; CHECK-NEXT: [[SA:%.*]] = sext i32 [[A]] to i64
; CHECK-NEXT: [[SB:%.*]] = zext nneg i32 [[B]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
-; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[SB]], i64 [[ADD]])
-; CHECK-NEXT: ret i64 [[SMAX]]
+; CHECK-NEXT: ret i64 [[SB]]
; CHECK: else:
; CHECK-NEXT: ret i64 0
;
@@ -671,8 +669,7 @@ define i64 @pr82271_zext_nneg(i32 %a, i32 %b){
; CHECK-NEXT: [[SA:%.*]] = zext nneg i32 [[A]] to i64
; CHECK-NEXT: [[SB:%.*]] = zext nneg i32 [[B]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
-; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[SB]], i64 [[ADD]])
-; CHECK-NEXT: ret i64 [[SMAX]]
+; CHECK-NEXT: ret i64 [[SB]]
; CHECK: else:
; CHECK-NEXT: ret i64 0
;
diff --git a/llvm/test/Transforms/ConstraintElimination/sext.ll b/llvm/test/Transforms/ConstraintElimination/sext.ll
index ed8dd502b6ef..5a8a37d0d570 100644
--- a/llvm/test/Transforms/ConstraintElimination/sext.ll
+++ b/llvm/test/Transforms/ConstraintElimination/sext.ll
@@ -11,8 +11,7 @@ define i1 @cmp_sext(i32 %a, i32 %b){
; CHECK-NEXT: [[SA:%.*]] = sext i32 [[A]] to i64
; CHECK-NEXT: [[SB:%.*]] = sext i32 [[B]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i64 [[SB]], [[ADD]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: ret i1 true
; CHECK: else:
; CHECK-NEXT: ret i1 false
;
@@ -31,33 +30,32 @@ else:
ret i1 false
}
-define i1 @cmp_sext_positive_increment(i32 %a, i32 %b, i64 %c){
-; CHECK-LABEL: define i1 @cmp_sext_positive_increment(
-; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) {
+define i1 @cmp_sext_add(i32 %a, i32 %b){
+; CHECK-LABEL: define i1 @cmp_sext_add(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[POS:%.*]] = icmp sgt i64 [[C]], 0
-; CHECK-NEXT: call void @llvm.assume(i1 [[POS]])
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A]], [[B]]
; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
-; CHECK-NEXT: [[SA:%.*]] = sext i32 [[A]] to i64
-; CHECK-NEXT: [[SB:%.*]] = sext i32 [[B]] to i64
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], [[C]]
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i64 [[SB]], [[ADD]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[A1:%.*]] = add nsw i32 [[A]], 1
+; CHECK-NEXT: [[B1:%.*]] = add nsw i32 [[B]], 1
+; CHECK-NEXT: [[SA:%.*]] = sext i32 [[A1]] to i64
+; CHECK-NEXT: [[SB:%.*]] = sext i32 [[B1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
+; CHECK-NEXT: ret i1 true
; CHECK: else:
; CHECK-NEXT: ret i1 false
;
entry:
- %pos = icmp sgt i64 %c, 0
- call void @llvm.assume(i1 %pos)
%cmp = icmp slt i32 %a, %b
br i1 %cmp, label %then, label %else
then:
- %sa = sext i32 %a to i64
- %sb = sext i32 %b to i64
- %add = add nsw i64 %sa, %c
+ %a1 = add nsw i32 %a, 1
+ %b1 = add nsw i32 %b, 1
+ %sa = sext i32 %a1 to i64
+ %sb = sext i32 %b1 to i64
+ %add = add nsw i64 %sa, 1
%cmp2 = icmp sge i64 %sb, %add
ret i1 %cmp2
@@ -65,30 +63,33 @@ else:
ret i1 false
}
-define i1 @cmp_sext_sgt(i32 %a, i32 %b){
-; CHECK-LABEL: define i1 @cmp_sext_sgt(
-; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+define i1 @cmp_sext_dynamic_increment(i32 %a, i32 %b, i64 %c){
+; CHECK-LABEL: define i1 @cmp_sext_dynamic_increment(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i64 [[C:%.*]]) {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[POS:%.*]] = icmp slt i64 [[C]], 2
+; CHECK-NEXT: call void @llvm.assume(i1 [[POS]])
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A]], [[B]]
; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
; CHECK: then:
; CHECK-NEXT: [[SA:%.*]] = sext i32 [[A]] to i64
; CHECK-NEXT: [[SB:%.*]] = sext i32 [[B]] to i64
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i64 [[SB]], [[ADD]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], [[C]]
+; CHECK-NEXT: ret i1 true
; CHECK: else:
; CHECK-NEXT: ret i1 false
;
entry:
+ %pos = icmp slt i64 %c, 2
+ call void @llvm.assume(i1 %pos)
%cmp = icmp slt i32 %a, %b
br i1 %cmp, label %then, label %else
then:
%sa = sext i32 %a to i64
%sb = sext i32 %b to i64
- %add = add nsw i64 %sa, 1
- %cmp2 = icmp sgt i64 %sb, %add
+ %add = add nsw i64 %sa, %c
+ %cmp2 = icmp sge i64 %sb, %add
ret i1 %cmp2
else:
@@ -105,8 +106,7 @@ define i1 @cmp_zext_nneg(i32 %a, i32 %b){
; CHECK-NEXT: [[SA:%.*]] = zext nneg i32 [[A]] to i64
; CHECK-NEXT: [[SB:%.*]] = zext nneg i32 [[B]] to i64
; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i64 [[SB]], [[ADD]]
-; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK-NEXT: ret i1 true
; CHECK: else:
; CHECK-NEXT: ret i1 false
;
@@ -216,3 +216,33 @@ then:
else:
ret i1 false
}
+
+define i1 @cmp_sext_sgt(i32 %a, i32 %b){
+; CHECK-LABEL: define i1 @cmp_sext_sgt(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[CMP]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; CHECK: then:
+; CHECK-NEXT: [[SA:%.*]] = sext i32 [[A]] to i64
+; CHECK-NEXT: [[SB:%.*]] = sext i32 [[B]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[SA]], 1
+; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i64 [[SB]], [[ADD]]
+; CHECK-NEXT: ret i1 [[CMP2]]
+; CHECK: else:
+; CHECK-NEXT: ret i1 false
+;
+entry:
+ %cmp = icmp slt i32 %a, %b
+ br i1 %cmp, label %then, label %else
+
+then:
+ %sa = sext i32 %a to i64
+ %sb = sext i32 %b to i64
+ %add = add nsw i64 %sa, 1
+ %cmp2 = icmp sgt i64 %sb, %add
+ ret i1 %cmp2
+
+else:
+ ret i1 false
+}
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
index 705b6e96fe9e..c9ee233b5a46 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll
@@ -71,7 +71,6 @@ define i8 @test6(i8 %x) {
; CHECK-LABEL: @test6(
; CHECK-NEXT: [[LIM:%.*]] = icmp uge i8 [[X:%.*]], 42
; CHECK-NEXT: call void @llvm.assume(i1 [[LIM]])
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.umin.i8(i8 [[X]], i8 42)
; CHECK-NEXT: ret i8 42
;
%lim = icmp uge i8 %x, 42
@@ -119,7 +118,6 @@ define i8 @test10(i8 %x) {
; CHECK-LABEL: @test10(
; CHECK-NEXT: [[LIM:%.*]] = icmp ule i8 [[X:%.*]], 42
; CHECK-NEXT: call void @llvm.assume(i1 [[LIM]])
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.umax.i8(i8 [[X]], i8 42)
; CHECK-NEXT: ret i8 42
;
%lim = icmp ule i8 %x, 42
@@ -167,7 +165,6 @@ define i8 @test14(i8 %x) {
; CHECK-LABEL: @test14(
; CHECK-NEXT: [[LIM:%.*]] = icmp sge i8 [[X:%.*]], 42
; CHECK-NEXT: call void @llvm.assume(i1 [[LIM]])
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.smin.i8(i8 [[X]], i8 42)
; CHECK-NEXT: ret i8 42
;
%lim = icmp sge i8 %x, 42
@@ -179,8 +176,8 @@ define i8 @test15(i8 %x) {
; CHECK-LABEL: @test15(
; CHECK-NEXT: [[LIM:%.*]] = icmp sge i8 [[X:%.*]], 41
; CHECK-NEXT: call void @llvm.assume(i1 [[LIM]])
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.smin.i8(i8 [[X]], i8 42)
-; CHECK-NEXT: ret i8 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umin.i8(i8 [[X]], i8 42)
+; CHECK-NEXT: ret i8 [[TMP1]]
;
%lim = icmp sge i8 %x, 41
call void @llvm.assume(i1 %lim)
@@ -192,8 +189,8 @@ define i8 @test16(i8 %x) {
; CHECK-LABEL: @test16(
; CHECK-NEXT: [[LIM:%.*]] = icmp sge i8 [[X:%.*]], 41
; CHECK-NEXT: call void @llvm.assume(i1 [[LIM]])
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.smax.i8(i8 [[X]], i8 42)
-; CHECK-NEXT: ret i8 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[X]], i8 42)
+; CHECK-NEXT: ret i8 [[TMP1]]
;
%lim = icmp sge i8 %x, 41
call void @llvm.assume(i1 %lim)
@@ -215,7 +212,6 @@ define i8 @test18(i8 %x) {
; CHECK-LABEL: @test18(
; CHECK-NEXT: [[LIM:%.*]] = icmp sle i8 [[X:%.*]], 42
; CHECK-NEXT: call void @llvm.assume(i1 [[LIM]])
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.smax.i8(i8 [[X]], i8 42)
; CHECK-NEXT: ret i8 42
;
%lim = icmp sle i8 %x, 42
@@ -235,3 +231,127 @@ define i8 @test19(i8 %x) {
%r = call i8 @llvm.smax(i8 %x, i8 42)
ret i8 %r
}
+
+declare void @body(i32)
+
+define void @test_bidirectional() {
+; CHECK-LABEL: @test_bidirectional(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: call void @body(i32 65535)
+; CHECK-NEXT: [[INC]] = add nsw i32 [[INDVAR]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INDVAR]], 65535
+; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %indvar = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %smax = call i32 @llvm.smax.i32(i32 %indvar, i32 65535)
+ call void @body(i32 %smax)
+ %inc = add nsw i32 %indvar, 1
+ %cmp = icmp slt i32 %indvar, 65535
+ br i1 %cmp, label %for.body, label %exit
+
+exit:
+ ret void
+}
+
+define i64 @test_at_use(i1 %cond, i64 %x) {
+; CHECK-LABEL: @test_at_use(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[COND:%.*]], label [[BB1:%.*]], label [[IF_END:%.*]]
+; CHECK: bb1:
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[X:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]]
+; CHECK: if.then:
+; CHECK-NEXT: ret i64 0
+; CHECK: if.end:
+; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[X]], [[BB1]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i64 [[PHI]]
+;
+entry:
+ br i1 %cond, label %bb1, label %if.end
+
+bb1:
+ %val = call i64 @llvm.smax.i64(i64 %x, i64 -1)
+ %cmp = icmp slt i64 %x, 0
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ ret i64 0
+
+if.end:
+ %phi = phi i64 [%val, %bb1], [0, %entry]
+ ret i64 %phi
+}
+
+define i8 @test_smax_to_umax_nneg(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_smax_to_umax_nneg(
+; CHECK-NEXT: [[NNEG_A:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT: [[NNEG_B:%.*]] = and i8 [[B:%.*]], 127
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[NNEG_A]], i8 [[NNEG_B]])
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %nneg_a = and i8 %a, 127
+ %nneg_b = and i8 %b, 127
+ %ret = call i8 @llvm.smax.i8(i8 %nneg_a, i8 %nneg_b)
+ ret i8 %ret
+}
+
+define i8 @test_smax_to_umax_neg(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_smax_to_umax_neg(
+; CHECK-NEXT: [[NEG_A:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT: [[NEG_B:%.*]] = or i8 [[B:%.*]], -128
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umax.i8(i8 [[NEG_A]], i8 [[NEG_B]])
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %neg_a = or i8 %a, 128
+ %neg_b = or i8 %b, 128
+ %ret = call i8 @llvm.smax.i8(i8 %neg_a, i8 %neg_b)
+ ret i8 %ret
+}
+
+define i8 @test_smin_to_umin_nneg(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_smin_to_umin_nneg(
+; CHECK-NEXT: [[NNEG_A:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT: [[NNEG_B:%.*]] = and i8 [[B:%.*]], 127
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umin.i8(i8 [[NNEG_A]], i8 [[NNEG_B]])
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %nneg_a = and i8 %a, 127
+ %nneg_b = and i8 %b, 127
+ %ret = call i8 @llvm.smin.i8(i8 %nneg_a, i8 %nneg_b)
+ ret i8 %ret
+}
+
+define i8 @test_smin_to_umin_neg(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_smin_to_umin_neg(
+; CHECK-NEXT: [[NEG_A:%.*]] = or i8 [[A:%.*]], -128
+; CHECK-NEXT: [[NEG_B:%.*]] = or i8 [[B:%.*]], -128
+; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.umin.i8(i8 [[NEG_A]], i8 [[NEG_B]])
+; CHECK-NEXT: ret i8 [[TMP1]]
+;
+ %neg_a = or i8 %a, 128
+ %neg_b = or i8 %b, 128
+ %ret = call i8 @llvm.smin.i8(i8 %neg_a, i8 %neg_b)
+ ret i8 %ret
+}
+
+define i8 @test_umax_nneg(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_umax_nneg(
+; CHECK-NEXT: [[NNEG_A:%.*]] = and i8 [[A:%.*]], 127
+; CHECK-NEXT: [[NNEG_B:%.*]] = and i8 [[B:%.*]], 127
+; CHECK-NEXT: [[RET:%.*]] = call i8 @llvm.umax.i8(i8 [[NNEG_A]], i8 [[NNEG_B]])
+; CHECK-NEXT: ret i8 [[RET]]
+;
+ %nneg_a = and i8 %a, 127
+ %nneg_b = and i8 %b, 127
+ %ret = call i8 @llvm.umax.i8(i8 %nneg_a, i8 %nneg_b)
+ ret i8 %ret
+}
diff --git a/llvm/test/Transforms/HotColdSplit/assumption-cache-invalidation.ll b/llvm/test/Transforms/HotColdSplit/assumption-cache-invalidation.ll
index 2154fb5cb5bc..8bc71148352d 100644
--- a/llvm/test/Transforms/HotColdSplit/assumption-cache-invalidation.ll
+++ b/llvm/test/Transforms/HotColdSplit/assumption-cache-invalidation.ll
@@ -13,13 +13,13 @@ target triple = "aarch64"
; CHECK-NOT: @llvm.assume
; CHECK: }
; CHECK: declare {{.*}}@llvm.assume
-; CHECK: define {{.*}}@f.cold.1()
-; CHECK-LABEL: newFuncRoot:
-; CHECK: }
-; CHECK: define {{.*}}@f.cold.2(i64 %load1)
+; CHECK: define {{.*}}@f.cold.1(i64 %load1)
; CHECK-LABEL: newFuncRoot:
; CHECK: %cmp1 = icmp eq i64 %load1, 0
; CHECK-NOT: call void @llvm.assume
+; CHECK: define {{.*}}@f.cold.2()
+; CHECK-LABEL: newFuncRoot:
+; CHECK: }
define void @f() {
entry:
diff --git a/llvm/test/Transforms/HotColdSplit/eh-pads.ll b/llvm/test/Transforms/HotColdSplit/eh-pads.ll
index 415c7e4b2bde..ad7baf97f68d 100644
--- a/llvm/test/Transforms/HotColdSplit/eh-pads.ll
+++ b/llvm/test/Transforms/HotColdSplit/eh-pads.ll
@@ -84,13 +84,16 @@ cold4:
; CHECK: sink
; CHECK-LABEL: define {{.*}}@bar.cold.1(
+; CHECK: sideeffect(i32 0)
+
+; CHECK-LABEL: define {{.*}}@bar.cold.2(
; CHECK: sideeffect(i32 1)
; CHECK-LABEL: define {{.*}}@baz.cold.1(
-; CHECK: sideeffect(i32 1)
+; CHECK: sideeffect(i32 0)
; CHECK-LABEL: define {{.*}}@baz.cold.2(
-; CHECK: sideeffect(i32 0)
+; CHECK: sideeffect(i32 1)
declare void @sideeffect(i32)
diff --git a/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll b/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll
index 65f8aad42406..0c055981260b 100644
--- a/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll
+++ b/llvm/test/Transforms/HotColdSplit/outline-disjoint-diamonds.ll
@@ -1,10 +1,10 @@
; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=-1 < %s 2>&1 | FileCheck %s
; CHECK-LABEL: define {{.*}}@fun
-; CHECK: call {{.*}}@fun.cold.2(
-; CHECK-NEXT: ret void
; CHECK: call {{.*}}@fun.cold.1(
; CHECK-NEXT: ret void
+; CHECK: call {{.*}}@fun.cold.2(
+; CHECK-NEXT: ret void
define void @fun() {
entry:
br i1 undef, label %A.then, label %A.else
@@ -49,9 +49,10 @@ B.cleanup:
}
; CHECK-LABEL: define {{.*}}@fun.cold.1(
-; CHECK: %B.cleanup.dest.slot.0 = phi i32 [ 1, %B.then5 ], [ 0, %B.end ]
+; CHECK: %A.cleanup.dest.slot.0 = phi i32 [ 1, %A.then5 ], [ 0, %A.end ]
; CHECK-NEXT: unreachable
; CHECK-LABEL: define {{.*}}@fun.cold.2(
-; CHECK: %A.cleanup.dest.slot.0 = phi i32 [ 1, %A.then5 ], [ 0, %A.end ]
+; CHECK: %B.cleanup.dest.slot.0 = phi i32 [ 1, %B.then5 ], [ 0, %B.end ]
; CHECK-NEXT: unreachable
+
diff --git a/llvm/test/Transforms/HotColdSplit/outline-inner-region.ll b/llvm/test/Transforms/HotColdSplit/outline-inner-region.ll
new file mode 100644
index 000000000000..73398bf365ff
--- /dev/null
+++ b/llvm/test/Transforms/HotColdSplit/outline-inner-region.ll
@@ -0,0 +1,49 @@
+; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-max-params=1 < %s | FileCheck %s
+
+target datalayout = "E-m:a-p:32:32-i64:64-n32"
+target triple = "powerpc64-ibm-aix7.2.0.0"
+
+define void @foo(i32 %cond) {
+; CHECK-LABEL: define {{.*}}@foo(
+; CHECK: if.then:
+; CHECK: br i1 {{.*}}, label %if.then1, label %codeRepl
+; CHECK-LABEL: codeRepl:
+; CHECK-NEXT: call void @foo.cold.1
+;
+entry:
+ %cond.addr = alloca i32
+ store i32 %cond, ptr %cond.addr
+ %0 = load i32, ptr %cond.addr
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.end2
+
+if.then: ; preds = %entry
+ %1 = load i32, ptr %cond.addr
+ call void @sink(i32 %0)
+ %cmp = icmp sgt i32 %1, 10
+ br i1 %cmp, label %if.then1, label %if.else
+
+if.then1: ; preds = %if.then
+ call void @sideeffect(i32 2)
+ br label %if.end
+
+if.else: ; preds = %if.then
+ call void @sink(i32 0)
+ call void @sideeffect(i32 0)
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then1
+ br label %if.end2
+
+if.end2: ; preds = %entry
+ call void @sideeffect(i32 1)
+ ret void
+}
+
+; CHECK-LABEL: define {{.*}}@foo.cold.1
+; CHECK: call {{.*}}@sink
+; CHECK-NEXT: call {{.*}}@sideeffect
+
+declare void @sideeffect(i32)
+
+declare void @sink(i32) cold
diff --git a/llvm/test/Transforms/HotColdSplit/outline-outer-region.ll b/llvm/test/Transforms/HotColdSplit/outline-outer-region.ll
new file mode 100644
index 000000000000..4a3c96982a87
--- /dev/null
+++ b/llvm/test/Transforms/HotColdSplit/outline-outer-region.ll
@@ -0,0 +1,52 @@
+; RUN: opt -S -passes=hotcoldsplit -hotcoldsplit-threshold=2 < %s | FileCheck %s
+
+target datalayout = "E-m:a-p:32:32-i64:64-n32"
+target triple = "powerpc64-ibm-aix7.2.0.0"
+
+define void @foo(i32 %cond, i32 %s0, i32 %s1) {
+; CHECK-LABEL: define {{.*}}@foo(
+; CHECK: br i1 {{.*}}, label %codeRepl, label %if.end2
+; CHECK-LABEL: codeRepl:
+; CHECK-NEXT: call void @foo.cold.1
+; CHECK-LABEL: if.end2:
+; CHECK: call void @sideeffect
+;
+entry:
+ %cond.addr = alloca i32
+ store i32 %cond, ptr %cond.addr
+ %0 = load i32, ptr %cond.addr
+ %tobool = icmp ne i32 %0, 0
+ br i1 %tobool, label %if.then, label %if.end2
+
+if.then: ; preds = %entry
+ %1 = load i32, ptr %cond.addr
+ %cmp = icmp sgt i32 %1, 10
+ br i1 %cmp, label %if.then1, label %if.else
+
+if.then1: ; preds = %if.then
+ call void @sideeffect(i32 0)
+ br label %if.end
+
+if.else: ; preds = %if.then
+ call void @sink(i32 %s0)
+ call void @sideeffect(i32 1)
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then1
+ call void @sink(i32 %0)
+ ret void
+
+if.end2: ; preds = %entry
+ call void @sideeffect(i32 %s1)
+ ret void
+}
+
+; CHECK-LABEL: define {{.*}}@foo.cold.1
+; CHECK: call {{.*}}@sink
+; CHECK: call {{.*}}@sideeffect
+; CHECK: call {{.*}}@sideeffect
+; CHECK: call {{.*}}@sink
+
+declare void @sideeffect(i32)
+
+declare void @sink(i32) cold
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/multiple-uses-of-val.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/multiple-uses-of-val.ll
new file mode 100644
index 000000000000..717bd0989773
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/multiple-uses-of-val.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -S -passes=infer-address-spaces --verify-each %s | FileCheck %s
+
+; Inst can use a value multiple time. When we're inserting an addrspacecast to flat,
+; it's important all the identical uses use an indentical replacement, especially
+; for PHIs.
+
+define amdgpu_kernel void @test_phi() {
+; CHECK-LABEL: @test_phi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOADED_PTR:%.*]] = load ptr, ptr addrspace(4) null, align 8
+; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[LOADED_PTR]] to ptr addrspace(1)
+; CHECK-NEXT: br label [[BB0:%.*]]
+; CHECK: bb0:
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr addrspace(1) [[TMP0]], i64 3
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(1) [[GEP]] to ptr
+; CHECK-NEXT: switch i32 0, label [[END:%.*]] [
+; CHECK-NEXT: i32 1, label [[END]]
+; CHECK-NEXT: i32 4, label [[END]]
+; CHECK-NEXT: i32 5, label [[BB1:%.*]]
+; CHECK-NEXT: ]
+; CHECK: bb1:
+; CHECK-NEXT: [[TMP2:%.*]] = load double, ptr addrspace(1) [[GEP]], align 16
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[RETVAL_SROA_0_0_I569_PH:%.*]] = phi ptr [ null, [[BB1]] ], [ [[TMP1]], [[BB0]] ], [ [[TMP1]], [[BB0]] ], [ [[TMP1]], [[BB0]] ]
+; CHECK-NEXT: ret void
+;
+entry:
+ %loaded.ptr = load ptr, ptr addrspace(4) null, align 8
+ br label %bb0
+
+bb0:
+ %gep = getelementptr i64, ptr %loaded.ptr, i64 3
+ switch i32 0, label %end [
+ i32 1, label %end
+ i32 4, label %end
+ i32 5, label %bb1
+ ]
+
+bb1:
+ %0 = load double, ptr %gep, align 16
+ br label %end
+
+end:
+ %retval.sroa.0.0.i569.ph = phi ptr [ null, %bb1 ], [ %gep, %bb0 ], [ %gep, %bb0 ], [ %gep, %bb0 ]
+ ret void
+}
+
+declare void @uses_ptrs(ptr, ptr, ptr)
+
+; We shouldn't treat PHIs differently, even other users should have the same treatment.
+; All occurences of %gep are replaced with an identical value.
+define amdgpu_kernel void @test_other() {
+; CHECK-LABEL: @test_other(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOADED_PTR:%.*]] = load ptr, ptr addrspace(4) null, align 8
+; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr [[LOADED_PTR]] to ptr addrspace(1)
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(1) [[TMP0]] to ptr
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[TMP1]], i64 3
+; CHECK-NEXT: call void @uses_ptrs(ptr [[GEP]], ptr [[GEP]], ptr [[GEP]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %loaded.ptr = load ptr, ptr addrspace(4) null, align 8
+ %gep = getelementptr i64, ptr %loaded.ptr, i64 3
+ call void @uses_ptrs(ptr %gep, ptr %gep, ptr %gep)
+ ret void
+}
diff --git a/llvm/test/Transforms/Inline/inline-sign-return-address.ll b/llvm/test/Transforms/Inline/inline-sign-return-address.ll
new file mode 100644
index 000000000000..c4d85fa671a4
--- /dev/null
+++ b/llvm/test/Transforms/Inline/inline-sign-return-address.ll
@@ -0,0 +1,104 @@
+; Check the inliner doesn't inline a function with different sign return address schemes.
+; RUN: opt < %s -passes=inline -S | FileCheck %s
+
+define internal void @foo_all() #0 {
+ ret void
+}
+
+define internal void @foo_nonleaf() #1 {
+ ret void
+}
+
+define internal void @foo_none() #2 {
+ ret void
+}
+
+define internal void @foo_lr() #3 {
+ ret void
+}
+
+define internal void @foo_bkey() #4 {
+ ret void
+}
+
+define dso_local void @bar_all() #0 {
+; CHECK-LABEL: bar_all
+; CHECK-NOT: call void @foo_all()
+; CHECK-NEXT: call void @foo_nonleaf()
+; CHECK-NEXT: call void @foo_none()
+; CHECK-NEXT: call void @foo_lr()
+; CHECK-NEXT: call void @foo_bkey()
+ call void @foo_all()
+ call void @foo_nonleaf()
+ call void @foo_none()
+ call void @foo_lr()
+ call void @foo_bkey()
+ ret void
+}
+
+define dso_local void @bar_nonleaf() #1 {
+; CHECK-LABEL: bar_nonleaf
+; CHECK-NEXT: call void @foo_all()
+; CHECK-NOT: call void @foo_nonleaf()
+; CHECK-NEXT: call void @foo_none()
+; CHECK-NEXT: call void @foo_lr()
+; CHECK-NEXT: call void @foo_bkey()
+ call void @foo_all()
+ call void @foo_nonleaf()
+ call void @foo_none()
+ call void @foo_lr()
+ call void @foo_bkey()
+ ret void
+}
+
+define dso_local void @bar_none() #2 {
+; CHECK-LABEL: bar_none
+; CHECK-NEXT: call void @foo_all()
+; CHECK-NEXT: call void @foo_nonleaf()
+; CHECK-NOT: call void @foo_none()
+; CHECK-NEXT: call void @foo_lr()
+; CHECK-NEXT: call void @foo_bkey()
+ call void @foo_all()
+ call void @foo_nonleaf()
+ call void @foo_none()
+ call void @foo_lr()
+ call void @foo_bkey()
+ ret void
+}
+
+define dso_local void @bar_lr() #3 {
+; CHECK-LABEL: bar_lr
+; CHECK-NEXT: call void @foo_all()
+; CHECK-NEXT: call void @foo_nonleaf()
+; CHECK-NEXT: call void @foo_none()
+; CHECK-NOT: call void @foo_lr()
+; CHECK-NEXT: call void @foo_bkey()
+ call void @foo_all()
+ call void @foo_nonleaf()
+ call void @foo_none()
+ call void @foo_lr()
+ call void @foo_bkey()
+ ret void
+}
+
+define dso_local void @bar_bkey() #4 {
+; CHECK-LABEL: bar_bkey
+; CHECK-NEXT: call void @foo_all()
+; CHECK-NEXT: call void @foo_nonleaf()
+; CHECK-NEXT: call void @foo_none()
+; CHECK-NEXT: call void @foo_lr()
+; CHECK-NOT: call void @foo_bkey()
+ call void @foo_all()
+ call void @foo_nonleaf()
+ call void @foo_none()
+ call void @foo_lr()
+ call void @foo_bkey()
+ ret void
+}
+
+
+attributes #0 = { "branch-protection-pauth-lr"="false" "sign-return-address"="all" }
+attributes #1 = { "branch-protection-pauth-lr"="false" "sign-return-address"="non-leaf" }
+attributes #2 = { "branch-protection-pauth-lr"="false" "sign-return-address"="none" }
+attributes #3 = { "branch-protection-pauth-lr"="true" "sign-return-address"="non-leaf" }
+attributes #4 = { "branch-protection-pauth-lr"="true" "sign-return-address"="non-leaf" "sign-return-address-key"="b_key" } \ No newline at end of file
diff --git a/llvm/test/Transforms/InstCombine/fpextend.ll b/llvm/test/Transforms/InstCombine/fpextend.ll
index a41f2a4ca300..19f512d717a9 100644
--- a/llvm/test/Transforms/InstCombine/fpextend.ll
+++ b/llvm/test/Transforms/InstCombine/fpextend.ll
@@ -437,3 +437,14 @@ define half @bf16_to_f32_to_f16(bfloat %a) nounwind {
%z = fptrunc float %y to half
ret half %z
}
+
+define bfloat @bf16_frem(bfloat %x) {
+; CHECK-LABEL: @bf16_frem(
+; CHECK-NEXT: [[FREM:%.*]] = frem bfloat [[X:%.*]], 0xR40C9
+; CHECK-NEXT: ret bfloat [[FREM]]
+;
+ %t1 = fpext bfloat %x to float
+ %t2 = frem float %t1, 6.281250e+00
+ %t3 = fptrunc float %t2 to bfloat
+ ret bfloat %t3
+}
diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll
index 3b0e5b4412fb..f277d13eee93 100644
--- a/llvm/test/Transforms/InstCombine/not.ll
+++ b/llvm/test/Transforms/InstCombine/not.ll
@@ -769,3 +769,92 @@ entry:
%cmp = icmp sle i32 %select, %not.c
ret i1 %cmp
}
+
+define i32 @test_sext(i32 %a, i32 %b){
+; CHECK-LABEL: @test_sext(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[A:%.*]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = sext i1 [[TMP1]] to i32
+; CHECK-NEXT: [[NOT:%.*]] = sub i32 [[TMP2]], [[B:%.*]]
+; CHECK-NEXT: ret i32 [[NOT]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %sext = sext i1 %cmp to i32
+ %add = add i32 %b, %sext
+ %not = xor i32 %add, -1
+ ret i32 %not
+}
+
+define <2 x i32> @test_sext_vec(<2 x i32> %a, <2 x i32> %b){
+; CHECK-LABEL: @test_sext_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i32> [[A:%.*]], zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32>
+; CHECK-NEXT: [[NOT:%.*]] = sub <2 x i32> [[TMP2]], [[B:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[NOT]]
+;
+ %cmp = icmp eq <2 x i32> %a, zeroinitializer
+ %sext = sext <2 x i1> %cmp to <2 x i32>
+ %add = add <2 x i32> %b, %sext
+ %not = xor <2 x i32> %add, <i32 -1, i32 -1>
+ ret <2 x i32> %not
+}
+
+define i64 @test_zext_nneg(i32 %c1, i64 %c2, i64 %c3){
+; CHECK-LABEL: @test_zext_nneg(
+; CHECK-NEXT: [[DOTNEG:%.*]] = add i64 [[C2:%.*]], -4
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[C1:%.*]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[C3:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = add i64 [[DOTNEG]], [[TMP2]]
+; CHECK-NEXT: ret i64 [[SUB]]
+;
+ %not = xor i32 %c1, -1
+ %conv = zext nneg i32 %not to i64
+ %add1 = add i64 %c2, -5
+ %add2 = add i64 %conv, %c3
+ %sub = sub i64 %add1, %add2
+ ret i64 %sub
+}
+
+define i8 @test_trunc(i8 %a){
+; CHECK-LABEL: @test_trunc(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i8 [[A:%.*]], 0
+; CHECK-NEXT: [[NOT:%.*]] = sext i1 [[TMP1]] to i8
+; CHECK-NEXT: ret i8 [[NOT]]
+;
+ %zext = zext i8 %a to i32
+ %sub = add nsw i32 %zext, -1
+ %shr = ashr i32 %sub, 31
+ %conv = trunc i32 %shr to i8
+ %not = xor i8 %conv, -1
+ ret i8 %not
+}
+
+define <2 x i8> @test_trunc_vec(<2 x i8> %a){
+; CHECK-LABEL: @test_trunc_vec(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i8> [[A:%.*]], zeroinitializer
+; CHECK-NEXT: [[NOT:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i8>
+; CHECK-NEXT: ret <2 x i8> [[NOT]]
+;
+ %zext = zext <2 x i8> %a to <2 x i32>
+ %sub = add nsw <2 x i32> %zext, <i32 -1, i32 -1>
+ %shr = ashr <2 x i32> %sub, <i32 31, i32 31>
+ %conv = trunc <2 x i32> %shr to <2 x i8>
+ %not = xor <2 x i8> %conv, <i8 -1, i8 -1>
+ ret <2 x i8> %not
+}
+
+; Negative tests
+
+define i32 @test_zext(i32 %a, i32 %b){
+; CHECK-LABEL: @test_zext(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 0
+; CHECK-NEXT: [[SEXT:%.*]] = zext i1 [[CMP]] to i32
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SEXT]], [[B:%.*]]
+; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[ADD]], -1
+; CHECK-NEXT: ret i32 [[NOT]]
+;
+ %cmp = icmp eq i32 %a, 0
+ %sext = zext i1 %cmp to i32
+ %add = add i32 %b, %sext
+ %not = xor i32 %add, -1
+ ret i32 %not
+}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/RISCV/term-fold-crash.ll b/llvm/test/Transforms/LoopStrengthReduce/RISCV/term-fold-crash.ll
new file mode 100644
index 000000000000..8ca7f0010bbb
--- /dev/null
+++ b/llvm/test/Transforms/LoopStrengthReduce/RISCV/term-fold-crash.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -passes=loop-reduce -mtriple=riscv64-unknown-linux-gnu < %s | FileCheck %s
+
+define void @test(ptr %p, i8 %arg, i32 %start) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: ptr [[P:%.*]], i8 [[ARG:%.*]], i32 [[START:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[ARG]] to i32
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[START]], [[SHR]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 1
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[ADD810:%.*]] = phi i32 [ [[START]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IDXPROM2:%.*]] = zext i32 [[ADD810]] to i64
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr i8, ptr [[P]], i64 [[IDXPROM2]]
+; CHECK-NEXT: [[V:%.*]] = load i8, ptr [[ARRAYIDX3]], align 1
+; CHECK-NEXT: [[ADD]] = add i32 [[ADD810]], 1
+; CHECK-NEXT: [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND:%.*]] = icmp eq i32 [[ADD]], [[TMP1]]
+; CHECK-NEXT: br i1 [[LSR_FOLD_TERM_COND_REPLACED_TERM_COND]], label [[EXIT:%.*]], label [[FOR_BODY]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %conv = zext i8 %arg to i32
+ %shr = lshr i32 %conv, 1
+ %wide.trip.count = zext nneg i32 %shr to i64
+ br label %for.body
+
+for.body:
+ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+ %add810 = phi i32 [ %start, %entry ], [ %add, %for.body ]
+ %idxprom2 = zext i32 %add810 to i64
+ %arrayidx3 = getelementptr i8, ptr %p, i64 %idxprom2
+ %v = load i8, ptr %arrayidx3, align 1
+ %add = add i32 %add810, 1
+ %indvars.iv.next = add i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv, %wide.trip.count
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
index 44ace377ac79..3e895edcd4f4 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -S < %s -passes=loop-vectorize -mtriple aarch64-linux-gnu -mattr=+sve 2>&1 | FileCheck %s
-define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val){
+define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,16) {
; CHECK-LABEL: define void @clamped_tc_8(
; CHECK-SAME: ptr nocapture [[DST:%.*]], i32 [[N:%.*]], i64 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
@@ -18,20 +18,15 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val){
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 [[N_VEC]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = sub i64 8, [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i64 8, [[TMP8]]
-; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 0
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 8)
-; CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 8 x i64> [[TMP12]], zeroinitializer
-; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 8 x i64> [[TMP13]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP14]]
-; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP16]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP17]], i64 0
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 8 x i64> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = mul <vscale x 8 x i64> [[TMP8]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 8
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP11]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
@@ -40,17 +35,17 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val){
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw <vscale x 8 x i64> [[VEC_IND]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP20:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP19]]
-; CHECK-NEXT: [[TMP21:%.*]] = trunc <vscale x 8 x i64> [[TMP20]] to <vscale x 8 x i8>
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw <vscale x 8 x i64> [[VEC_IND]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP15:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = trunc <vscale x 8 x i64> [[TMP15]] to <vscale x 8 x i8>
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP16]], ptr [[TMP17]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP11]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 8)
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP23:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP18:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
@@ -61,8 +56,8 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val){
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
-; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP24]]
+; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
+; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1
@@ -91,7 +86,7 @@ for.cond.cleanup: ; preds = %for.body
ret void
}
-define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val){
+define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,16) {
; CHECK-LABEL: define void @clamped_tc_max_8(
; CHECK-SAME: ptr nocapture [[DST:%.*]], i32 [[N:%.*]], i64 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
@@ -115,20 +110,15 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val){
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[DST]], i64 [[N_VEC]]
; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
-; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 8
-; CHECK-NEXT: [[TMP9:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ugt i64 [[WIDE_TRIP_COUNT]], [[TMP8]]
-; CHECK-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i64 [[TMP9]], i64 0
; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]])
-; CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 8 x i64> [[TMP12]], zeroinitializer
-; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 8 x i64> [[TMP13]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP14]]
-; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 8
-; CHECK-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP16]]
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP17]], i64 0
+; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT: [[TMP8:%.*]] = add <vscale x 8 x i64> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = mul <vscale x 8 x i64> [[TMP8]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 8
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 1, [[TMP11]]
+; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[TMP12]], i64 0
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[VAL]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
@@ -137,17 +127,17 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val){
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 8 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 8 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw <vscale x 8 x i64> [[VEC_IND]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
-; CHECK-NEXT: [[TMP20:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP19]]
-; CHECK-NEXT: [[TMP21:%.*]] = trunc <vscale x 8 x i64> [[TMP20]] to <vscale x 8 x i8>
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
-; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP21]], ptr [[TMP22]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw <vscale x 8 x i64> [[VEC_IND]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP15:%.*]] = lshr <vscale x 8 x i64> [[BROADCAST_SPLAT]], [[TMP14]]
+; CHECK-NEXT: [[TMP16:%.*]] = trunc <vscale x 8 x i64> [[TMP15]] to <vscale x 8 x i8>
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 0
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP16]], ptr [[TMP17]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX]], i64 [[TMP11]])
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[INDEX_NEXT]], i64 [[WIDE_TRIP_COUNT]])
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 8 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; CHECK-NEXT: [[TMP23:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP18:%.*]] = xor <vscale x 8 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i64 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer)
; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
@@ -158,8 +148,8 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val){
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
-; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP24]]
+; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
+; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
; CHECK-NEXT: store i8 [[CONV4]], ptr [[P_OUT_TAIL_09]], align 1
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[P_OUT_TAIL_09]], i64 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
index 0c5394cb95a6..7ccbc98d2656 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
@@ -3,6 +3,116 @@
target triple = "riscv64"
+define void @trip1_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
+; CHECK-LABEL: @trip1_i8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP0]], 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP1]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 1
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
+ %0 = load i8, ptr %arrayidx, align 1
+ %mul = shl i8 %0, 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
+ %1 = load i8, ptr %arrayidx1, align 1
+ %add = add i8 %mul, %1
+ store i8 %add, ptr %arrayidx1, align 1
+ %inc = add nuw nsw i64 %i.08, 1
+ %exitcond.not = icmp eq i64 %inc, 1
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @trip3_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
+; CHECK-LABEL: @trip3_i8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 16
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 16
+; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 3, [[TMP4]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 16
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[TMP7]], i64 3)
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 0
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP9]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i8> poison)
+; CHECK-NEXT: [[TMP10:%.*]] = shl <vscale x 16 x i8> [[WIDE_MASKED_LOAD]], shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> poison, i8 1, i64 0), <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 0
+; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr [[TMP12]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]], <vscale x 16 x i8> poison)
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 16 x i8> [[TMP10]], [[WIDE_MASKED_LOAD1]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP13]], ptr [[TMP12]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP14]], 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP15]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 3
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
+ %0 = load i8, ptr %arrayidx, align 1
+ %mul = shl i8 %0, 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
+ %1 = load i8, ptr %arrayidx1, align 1
+ %add = add i8 %mul, %1
+ store i8 %add, ptr %arrayidx1, align 1
+ %inc = add nuw nsw i64 %i.08, 1
+ %exitcond.not = icmp eq i64 %inc, 3
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
; CHECK-LABEL: @trip5_i8(
; CHECK-NEXT: entry:
@@ -33,7 +143,7 @@ define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 16 x i8> [[TMP10]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[TMP13]], ptr [[TMP12]], i32 1, <vscale x 16 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
@@ -50,7 +160,7 @@ define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 5
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
@@ -74,4 +184,258 @@ for.end: ; preds = %for.body
ret void
}
-attributes #0 = { "target-features"="+v,+d" }
+define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
+; CHECK-LABEL: @trip8_i8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8
+; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 8, [[TMP4]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 8
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 [[TMP7]], i64 8)
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 0
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP9]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x i8> poison)
+; CHECK-NEXT: [[TMP10:%.*]] = shl <vscale x 8 x i8> [[WIDE_MASKED_LOAD]], shufflevector (<vscale x 8 x i8> insertelement (<vscale x 8 x i8> poison, i8 1, i64 0), <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer)
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 0
+; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr [[TMP12]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]], <vscale x 8 x i8> poison)
+; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 8 x i8> [[TMP10]], [[WIDE_MASKED_LOAD1]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP13]], ptr [[TMP12]], i32 1, <vscale x 8 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP14]], 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP15]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 8
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
+ %0 = load i8, ptr %arrayidx, align 1
+ %mul = shl i8 %0, 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
+ %1 = load i8, ptr %arrayidx1, align 1
+ %add = add i8 %mul, %1
+ store i8 %add, ptr %arrayidx1, align 1
+ %inc = add nuw nsw i64 %i.08, 1
+ %exitcond.not = icmp eq i64 %inc, 8
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @trip16_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
+; CHECK-LABEL: @trip16_i8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = shl <16 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-NEXT: [[TMP6:%.*]] = add <16 x i8> [[TMP3]], [[WIDE_LOAD1]]
+; CHECK-NEXT: store <16 x i8> [[TMP6]], ptr [[TMP5]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 16
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
+ %0 = load i8, ptr %arrayidx, align 1
+ %mul = shl i8 %0, 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
+ %1 = load i8, ptr %arrayidx1, align 1
+ %add = add i8 %mul, %1
+ store i8 %add, ptr %arrayidx1, align 1
+ %inc = add nuw nsw i64 %i.08, 1
+ %exitcond.not = icmp eq i64 %inc, 16
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+
+define void @trip32_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
+; CHECK-LABEL: @trip32_i8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <32 x i8>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = shl <32 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <32 x i8>, ptr [[TMP5]], align 1
+; CHECK-NEXT: [[TMP6:%.*]] = add <32 x i8> [[TMP3]], [[WIDE_LOAD1]]
+; CHECK-NEXT: store <32 x i8> [[TMP6]], ptr [[TMP5]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 32, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP7]], 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP8]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 32
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
+ %0 = load i8, ptr %arrayidx, align 1
+ %mul = shl i8 %0, 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
+ %1 = load i8, ptr %arrayidx1, align 1
+ %add = add i8 %mul, %1
+ store i8 %add, ptr %arrayidx1, align 1
+ %inc = add nuw nsw i64 %i.08, 1
+ %exitcond.not = icmp eq i64 %inc, 32
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+define void @trip24_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
+; CHECK-LABEL: @trip24_i8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = shl <8 x i8> [[WIDE_LOAD]], <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
+; CHECK-NEXT: [[TMP6:%.*]] = add <8 x i8> [[TMP3]], [[WIDE_LOAD1]]
+; CHECK-NEXT: store <8 x i8> [[TMP6]], ptr [[TMP5]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 24, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP8]], 1
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I_08]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP9]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 24
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: for.end:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
+ %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
+ %0 = load i8, ptr %arrayidx, align 1
+ %mul = shl i8 %0, 1
+ %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
+ %1 = load i8, ptr %arrayidx1, align 1
+ %add = add i8 %mul, %1
+ store i8 %add, ptr %arrayidx1, align 1
+ %inc = add nuw nsw i64 %i.08, 1
+ %exitcond.not = icmp eq i64 %inc, 24
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body
+ ret void
+}
+
+attributes #0 = { "target-features"="+v,+d" vscale_range(2, 1024) }
+
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll b/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll
new file mode 100644
index 000000000000..f98269598333
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll
@@ -0,0 +1,26 @@
+; REQUIRES: asserts
+; RUN: not --crash opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -S < %s
+; RUN: not --crash opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -force-vector-width=4 -S < %s
+
+@h = global i64 0
+
+define void @test(ptr %p) {
+entry:
+ br label %for.body
+
+for.body:
+ %idx.ext.merge = phi i64 [ 1, %entry ], [ %idx, %for.body ]
+ %inc.merge = phi i16 [ 1, %entry ], [ %inc, %for.body ]
+ %idx.merge = phi i64 [ 0, %entry ], [ %idx.ext.merge, %for.body ]
+ %add = shl i64 %idx.merge, 1
+ %arrayidx = getelementptr i64, ptr %p, i64 %add
+ store i64 0, ptr %arrayidx
+ %inc = add i16 %inc.merge, 1
+ %idx = zext i16 %inc to i64
+ %gep = getelementptr i64, ptr %p, i64 %idx
+ %cmp = icmp ugt ptr %gep, @h
+ br i1 %cmp, label %exit, label %for.body
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/OpenMP/deduplication_soundness.ll b/llvm/test/Transforms/OpenMP/deduplication_soundness.ll
new file mode 100644
index 000000000000..9dd3219175fe
--- /dev/null
+++ b/llvm/test/Transforms/OpenMP/deduplication_soundness.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function main --scrub-attributes --filter "@omp_get_thread_limit|@use" --version 4
+; RUN: opt -passes=openmp-opt-cgscc -S < %s | FileCheck %s
+
+declare void @use(i32 noundef)
+declare i32 @omp_get_thread_limit()
+declare void @__kmpc_set_thread_limit(ptr, i32, i32)
+declare i32 @__kmpc_global_thread_num(ptr)
+declare noalias ptr @__kmpc_omp_task_alloc(ptr, i32, i32, i64, i64, ptr)
+declare void @__kmpc_omp_task_complete_if0(ptr, i32, ptr)
+declare void @__kmpc_omp_task_begin_if0(ptr, i32, ptr)
+
+%struct.ident_t = type { i32, i32, i32, i32, ptr }
+
+@0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
+@1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8
+
+define i32 @main() local_unnamed_addr {
+; CHECK-LABEL: define i32 @main() local_unnamed_addr {
+; CHECK: [[CALL_I_I_I:%.*]] = call i32 @omp_get_thread_limit()
+; CHECK: call void @use(i32 noundef [[CALL_I_I_I]])
+; CHECK: [[CALL_I_I_I2:%.*]] = call i32 @omp_get_thread_limit()
+; CHECK: call void @use(i32 noundef [[CALL_I_I_I2]])
+;
+entry:
+ %0 = call i32 @__kmpc_global_thread_num(ptr nonnull @1)
+ %1 = call ptr @__kmpc_omp_task_alloc(ptr nonnull @1, i32 %0, i32 1, i64 40, i64 0, ptr nonnull @.omp_task_entry.)
+ call void @__kmpc_omp_task_begin_if0(ptr nonnull @1, i32 %0, ptr %1)
+ call void @__kmpc_set_thread_limit(ptr nonnull @1, i32 %0, i32 4)
+ %call.i.i.i = call i32 @omp_get_thread_limit()
+ call void @use(i32 noundef %call.i.i.i)
+ call void @__kmpc_omp_task_complete_if0(ptr nonnull @1, i32 %0, ptr %1)
+ %2 = call ptr @__kmpc_omp_task_alloc(ptr nonnull @1, i32 %0, i32 1, i64 40, i64 0, ptr nonnull @.omp_task_entry..2)
+ call void @__kmpc_omp_task_begin_if0(ptr nonnull @1, i32 %0, ptr %2)
+ call void @__kmpc_set_thread_limit(ptr nonnull @1, i32 %0, i32 3)
+ %call.i.i.i2 = call i32 @omp_get_thread_limit()
+ call void @use(i32 noundef %call.i.i.i2)
+ call void @__kmpc_omp_task_complete_if0(ptr nonnull @1, i32 %0, ptr %2)
+ ret i32 0
+}
+
+define internal noundef i32 @.omp_task_entry.(i32 noundef %0, ptr noalias nocapture noundef readonly %1) {
+entry:
+ tail call void @__kmpc_set_thread_limit(ptr nonnull @1, i32 %0, i32 4)
+ %call.i.i = tail call i32 @omp_get_thread_limit()
+ tail call void @use(i32 noundef %call.i.i)
+ ret i32 0
+}
+
+define internal noundef i32 @.omp_task_entry..2(i32 noundef %0, ptr noalias nocapture noundef readonly %1) {
+entry:
+ tail call void @__kmpc_set_thread_limit(ptr nonnull @1, i32 %0, i32 3)
+ %call.i.i = tail call i32 @omp_get_thread_limit()
+ tail call void @use(i32 noundef %call.i.i)
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 7, !"openmp", i32 51}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/extractelements-to-shuffle.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/extractelements-to-shuffle.ll
index 8f76b2e54e6c..44542f32bf14 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/extractelements-to-shuffle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/extractelements-to-shuffle.ll
@@ -76,10 +76,10 @@ define void @dist_vec(ptr nocapture noundef readonly %pA, ptr nocapture noundef
; CHECK-NEXT: [[PB_ADDR_0_LCSSA:%.*]] = phi ptr [ [[PB]], [[ENTRY]] ], [ [[SCEVGEP311]], [[WHILE_END_LOOPEXIT]] ]
; CHECK-NEXT: [[PA_ADDR_0_LCSSA:%.*]] = phi ptr [ [[PA]], [[ENTRY]] ], [ [[SCEVGEP]], [[WHILE_END_LOOPEXIT]] ]
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x i64> [[TMP4FT_0_LCSSA]], <2 x i64> [[TMP4TF_0_LCSSA]], <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP4TT_0_LCSSA]], <2 x i64> [[TMP4FF_0_LCSSA]], <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP4FF_0_LCSSA]], <2 x i64> [[TMP4TT_0_LCSSA]], <2 x i32> <i32 0, i32 2>
; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x i64> [[TMP10]], <2 x i64> [[TMP11]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x i64> [[TMP4FT_0_LCSSA]], <2 x i64> [[TMP4TF_0_LCSSA]], <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <2 x i64> [[TMP4TT_0_LCSSA]], <2 x i64> [[TMP4FF_0_LCSSA]], <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <2 x i64> [[TMP4FF_0_LCSSA]], <2 x i64> [[TMP4TT_0_LCSSA]], <2 x i32> <i32 1, i32 3>
; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <2 x i64> [[TMP13]], <2 x i64> [[TMP14]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[TMP16:%.*]] = add <4 x i64> [[TMP12]], [[TMP15]]
; CHECK-NEXT: [[TMP17:%.*]] = trunc <4 x i64> [[TMP16]] to <4 x i32>
@@ -107,12 +107,12 @@ define void @dist_vec(ptr nocapture noundef readonly %pA, ptr nocapture noundef
; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <2 x i32> [[TMP22]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP24:%.*]] = icmp eq <2 x i32> [[TMP23]], zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <2 x i32> [[TMP23]], zeroinitializer
-; CHECK-NEXT: [[TMP26:%.*]] = shufflevector <2 x i1> [[TMP24]], <2 x i1> [[TMP25]], <4 x i32> <i32 0, i32 3, i32 3, i32 0>
+; CHECK-NEXT: [[TMP26:%.*]] = shufflevector <2 x i1> [[TMP24]], <2 x i1> [[TMP25]], <4 x i32> <i32 0, i32 3, i32 0, i32 3>
; CHECK-NEXT: [[TMP27:%.*]] = insertelement <2 x i32> poison, i32 [[AND95]], i32 0
; CHECK-NEXT: [[TMP28:%.*]] = shufflevector <2 x i32> [[TMP27]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP29:%.*]] = icmp ne <2 x i32> [[TMP28]], zeroinitializer
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq <2 x i32> [[TMP28]], zeroinitializer
-; CHECK-NEXT: [[TMP31:%.*]] = shufflevector <2 x i1> [[TMP29]], <2 x i1> [[TMP30]], <4 x i32> <i32 0, i32 3, i32 0, i32 3>
+; CHECK-NEXT: [[TMP31:%.*]] = shufflevector <2 x i1> [[TMP29]], <2 x i1> [[TMP30]], <4 x i32> <i32 0, i32 3, i32 3, i32 0>
; CHECK-NEXT: [[TMP32:%.*]] = select <4 x i1> [[TMP26]], <4 x i1> [[TMP31]], <4 x i1> zeroinitializer
; CHECK-NEXT: [[TMP33:%.*]] = zext <4 x i1> [[TMP32]] to <4 x i32>
; CHECK-NEXT: [[TMP34]] = add <4 x i32> [[TMP21]], [[TMP33]]
@@ -152,12 +152,12 @@ define void @dist_vec(ptr nocapture noundef readonly %pA, ptr nocapture noundef
; CHECK-NEXT: [[TMP40:%.*]] = shufflevector <2 x i32> [[TMP39]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP41:%.*]] = icmp eq <2 x i32> [[TMP40]], zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = icmp ne <2 x i32> [[TMP40]], zeroinitializer
-; CHECK-NEXT: [[TMP43:%.*]] = shufflevector <2 x i1> [[TMP41]], <2 x i1> [[TMP42]], <4 x i32> <i32 0, i32 3, i32 3, i32 0>
+; CHECK-NEXT: [[TMP43:%.*]] = shufflevector <2 x i1> [[TMP41]], <2 x i1> [[TMP42]], <4 x i32> <i32 0, i32 3, i32 0, i32 3>
; CHECK-NEXT: [[TMP44:%.*]] = insertelement <2 x i32> poison, i32 [[AND134]], i32 0
; CHECK-NEXT: [[TMP45:%.*]] = shufflevector <2 x i32> [[TMP44]], <2 x i32> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <2 x i32> [[TMP45]], zeroinitializer
; CHECK-NEXT: [[TMP47:%.*]] = icmp eq <2 x i32> [[TMP45]], zeroinitializer
-; CHECK-NEXT: [[TMP48:%.*]] = shufflevector <2 x i1> [[TMP46]], <2 x i1> [[TMP47]], <4 x i32> <i32 0, i32 3, i32 0, i32 3>
+; CHECK-NEXT: [[TMP48:%.*]] = shufflevector <2 x i1> [[TMP46]], <2 x i1> [[TMP47]], <4 x i32> <i32 0, i32 3, i32 3, i32 0>
; CHECK-NEXT: [[TMP49:%.*]] = select <4 x i1> [[TMP43]], <4 x i1> [[TMP48]], <4 x i1> zeroinitializer
; CHECK-NEXT: [[TMP50:%.*]] = zext <4 x i1> [[TMP49]] to <4 x i32>
; CHECK-NEXT: [[TMP51]] = add <4 x i32> [[TMP38]], [[TMP50]]
@@ -166,9 +166,9 @@ define void @dist_vec(ptr nocapture noundef readonly %pA, ptr nocapture noundef
; CHECK-NEXT: br i1 [[CMP130_NOT]], label [[WHILE_END166]], label [[WHILE_BODY132]]
; CHECK: while.end166:
; CHECK-NEXT: [[TMP52:%.*]] = phi <4 x i32> [ [[TMP35]], [[WHILE_END122]] ], [ [[TMP51]], [[WHILE_BODY132]] ]
-; CHECK-NEXT: [[TMP53:%.*]] = extractelement <4 x i32> [[TMP52]], i32 2
+; CHECK-NEXT: [[TMP53:%.*]] = extractelement <4 x i32> [[TMP52]], i32 3
; CHECK-NEXT: store i32 [[TMP53]], ptr [[CTT:%.*]], align 4
-; CHECK-NEXT: [[TMP54:%.*]] = extractelement <4 x i32> [[TMP52]], i32 3
+; CHECK-NEXT: [[TMP54:%.*]] = extractelement <4 x i32> [[TMP52]], i32 2
; CHECK-NEXT: store i32 [[TMP54]], ptr [[CFF:%.*]], align 4
; CHECK-NEXT: [[TMP55:%.*]] = extractelement <4 x i32> [[TMP52]], i32 1
; CHECK-NEXT: store i32 [[TMP55]], ptr [[CTF:%.*]], align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/reorder-fmuladd-crash.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/reorder-fmuladd-crash.ll
index 0a6899641044..dc05967af152 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/reorder-fmuladd-crash.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/reorder-fmuladd-crash.ll
@@ -6,7 +6,7 @@ define i32 @foo(i32 %v1, double %v2) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> <i32 poison, i32 undef>, i32 [[V1:%.*]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = sitofp <2 x i32> [[TMP0]] to <2 x double>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <4 x i32> <i32 1, i32 0, i32 1, i32 0>
; CHECK-NEXT: br label [[FOR_COND15_PREHEADER:%.*]]
; CHECK: for.cond15.preheader:
; CHECK-NEXT: br label [[IF_END:%.*]]
@@ -26,14 +26,15 @@ define i32 @foo(i32 %v1, double %v2) {
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
; CHECK-NEXT: [[TMP6:%.*]] = load <4 x double>, ptr [[ARRAYIDX43]], align 8
; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x double> [[TMP6]], [[TMP5]]
-; CHECK-NEXT: [[TMP8:%.*]] = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> [[TMP2]], <4 x double> [[TMP7]])
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> poison, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
+; CHECK-NEXT: [[TMP9:%.*]] = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> [[TMP2]], <4 x double> [[TMP8]])
; CHECK-NEXT: br label [[SW_EPILOG:%.*]]
; CHECK: sw.bb195:
; CHECK-NEXT: br label [[SW_EPILOG]]
; CHECK: do.body:
; CHECK-NEXT: unreachable
; CHECK: sw.epilog:
-; CHECK-NEXT: [[TMP9:%.*]] = phi <4 x double> [ undef, [[SW_BB195]] ], [ [[TMP8]], [[SW_BB]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = phi <4 x double> [ undef, [[SW_BB195]] ], [ [[TMP9]], [[SW_BB]] ]
; CHECK-NEXT: ret i32 undef
; CHECK: if.end.1:
; CHECK-NEXT: br label [[FOR_COND15_1:%.*]]
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/tsc-s116.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/tsc-s116.ll
index 28af0de17123..95aa40f664c0 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/tsc-s116.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/tsc-s116.ll
@@ -20,17 +20,17 @@ define void @s116_modified(ptr %a) {
; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 1
; CHECK-NEXT: [[GEP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 3
; CHECK-NEXT: [[LD0:%.*]] = load float, ptr [[A]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[GEP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr [[GEP3]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x float> poison, float [[LD0]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 poison>
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP5]], <4 x float> [[TMP6]], <4 x i32> <i32 0, i32 5, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x float> [[TMP7]], <4 x float> [[TMP8]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
-; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x float> [[TMP6]], <4 x float> [[TMP8]], <4 x i32> <i32 0, i32 poison, i32 2, i32 4>
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x float> [[TMP10]], <4 x float> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
-; CHECK-NEXT: [[TMP12:%.*]] = fmul fast <4 x float> [[TMP9]], [[TMP11]]
-; CHECK-NEXT: store <4 x float> [[TMP12]], ptr [[A]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[GEP3]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> poison, float [[LD0]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 1, i32 poison>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> [[TMP4]], <4 x i32> <i32 0, i32 5, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP5]], <4 x float> [[TMP6]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP6]], <4 x i32> <i32 0, i32 poison, i32 2, i32 4>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <4 x i32> <i32 0, i32 0, i32 2, i32 3>
+; CHECK-NEXT: [[TMP10:%.*]] = fmul fast <4 x float> [[TMP7]], [[TMP9]]
+; CHECK-NEXT: store <4 x float> [[TMP10]], ptr [[A]], align 4
; CHECK-NEXT: ret void
;
%gep1 = getelementptr inbounds float, ptr %a, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
index 5707e143ad55..89ea15d3ab3b 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/vec3-reorder-reshuffle.ll
@@ -143,16 +143,17 @@ define void @gather_2(ptr %mat1, float %0, float %1) {
; CHECK-SAME: ptr [[MAT1:%.*]], float [[TMP0:%.*]], float [[TMP1:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP2:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float 0.000000e+00, float 0.000000e+00)
-; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x float> [[TMP3]], float [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP6:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP4]], <2 x float> [[TMP5]], <2 x float> zeroinitializer)
-; CHECK-NEXT: [[TMP7:%.*]] = fmul float [[TMP2]], 0.000000e+00
+; CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.fmuladd.f32(float [[TMP1]], float [[TMP0]], float 0.000000e+00)
+; CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP1]], float 0.000000e+00)
+; CHECK-NEXT: [[TMP5:%.*]] = fmul float [[TMP2]], 0.000000e+00
+; CHECK-NEXT: [[TMP6:%.*]] = fmul float [[TMP3]], 0.000000e+00
+; CHECK-NEXT: [[TMP7:%.*]] = fmul float [[TMP4]], 0.000000e+00
; CHECK-NEXT: [[ARRAYIDX163:%.*]] = getelementptr [4 x [4 x float]], ptr [[MAT1]], i64 0, i64 1
; CHECK-NEXT: [[ARRAYIDX2_I_I_I278:%.*]] = getelementptr [4 x [4 x float]], ptr [[MAT1]], i64 0, i64 1, i64 1
-; CHECK-NEXT: store float [[TMP7]], ptr [[ARRAYIDX163]], align 4
-; CHECK-NEXT: [[TMP8:%.*]] = fmul <2 x float> [[TMP6]], zeroinitializer
-; CHECK-NEXT: store <2 x float> [[TMP8]], ptr [[ARRAYIDX2_I_I_I278]], align 4
+; CHECK-NEXT: [[ARRAYIDX5_I_I_I280:%.*]] = getelementptr [4 x [4 x float]], ptr [[MAT1]], i64 0, i64 1, i64 2
+; CHECK-NEXT: store float [[TMP5]], ptr [[ARRAYIDX163]], align 4
+; CHECK-NEXT: store float [[TMP6]], ptr [[ARRAYIDX2_I_I_I278]], align 4
+; CHECK-NEXT: store float [[TMP7]], ptr [[ARRAYIDX5_I_I_I280]], align 4
; CHECK-NEXT: ret void
;
entry:
@@ -183,19 +184,18 @@ define i32 @reorder_indices_1(float %0) {
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[NEG11_I:%.*]] = fmul float [[TMP4]], [[TMP0]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fmuladd.f32(float [[TMP1]], float 0.000000e+00, float [[NEG11_I]])
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> <i32 1, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x float> [[TMP6]], float [[TMP1]], i32 1
; CHECK-NEXT: [[TMP8:%.*]] = fneg <2 x float> [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i32 0
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = fmul <2 x float> [[TMP8]], [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x float> [[TMP11]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP13:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> zeroinitializer, <2 x float> [[TMP12]])
-; CHECK-NEXT: [[TMP14:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP10]], <2 x float> [[TMP13]], <2 x float> zeroinitializer)
-; CHECK-NEXT: [[TMP15:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP5]], float 0.000000e+00)
-; CHECK-NEXT: [[TMP16:%.*]] = fmul <2 x float> [[TMP14]], zeroinitializer
-; CHECK-NEXT: [[MUL6_I_I_I:%.*]] = fmul float [[TMP15]], 0.000000e+00
-; CHECK-NEXT: store <2 x float> [[TMP16]], ptr [[NOR1]], align 4
+; CHECK-NEXT: [[TMP12:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> zeroinitializer, <2 x float> [[TMP11]])
+; CHECK-NEXT: [[TMP13:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP10]], <2 x float> [[TMP12]], <2 x float> zeroinitializer)
+; CHECK-NEXT: [[TMP14:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP5]], float 0.000000e+00)
+; CHECK-NEXT: [[TMP15:%.*]] = fmul <2 x float> [[TMP13]], zeroinitializer
+; CHECK-NEXT: [[MUL6_I_I_I:%.*]] = fmul float [[TMP14]], 0.000000e+00
+; CHECK-NEXT: store <2 x float> [[TMP15]], ptr [[NOR1]], align 4
; CHECK-NEXT: store float [[MUL6_I_I_I]], ptr [[ARRAYIDX2_I265]], align 4
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
index 9c7e8f66c6c6..cb24a9cefffa 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
@@ -68,10 +68,10 @@ define void @pr35497() local_unnamed_addr #0 {
; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> poison, <2 x i32> <i32 1, i32 0>
; SSE-NEXT: [[TMP5:%.*]] = add nuw nsw <2 x i64> [[TMP4]], zeroinitializer
; SSE-NEXT: store <2 x i64> [[TMP5]], ptr undef, align 1
-; SSE-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> [[TMP5]], i64 [[ADD]], i32 0
-; SSE-NEXT: [[TMP7:%.*]] = shl <2 x i64> [[TMP6]], <i64 2, i64 2>
-; SSE-NEXT: [[TMP8:%.*]] = and <2 x i64> [[TMP7]], <i64 20, i64 20>
-; SSE-NEXT: [[TMP9:%.*]] = shufflevector <2 x i64> [[TMP8]], <2 x i64> poison, <2 x i32> <i32 1, i32 0>
+; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> poison, <2 x i32> <i32 1, i32 poison>
+; SSE-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[ADD]], i32 1
+; SSE-NEXT: [[TMP8:%.*]] = shl <2 x i64> [[TMP7]], <i64 2, i64 2>
+; SSE-NEXT: [[TMP9:%.*]] = and <2 x i64> [[TMP8]], <i64 20, i64 20>
; SSE-NEXT: [[TMP10:%.*]] = lshr <2 x i64> [[TMP5]], <i64 6, i64 6>
; SSE-NEXT: [[TMP11:%.*]] = add nuw nsw <2 x i64> [[TMP9]], [[TMP10]]
; SSE-NEXT: store <2 x i64> [[TMP11]], ptr [[ARRAYIDX2_2]], align 1
@@ -88,10 +88,10 @@ define void @pr35497() local_unnamed_addr #0 {
; AVX-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], <i64 20, i64 20>
; AVX-NEXT: [[TMP4:%.*]] = add nuw nsw <2 x i64> [[TMP3]], zeroinitializer
; AVX-NEXT: store <2 x i64> [[TMP4]], ptr undef, align 1
-; AVX-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[ADD]], i32 0
-; AVX-NEXT: [[TMP6:%.*]] = shl <2 x i64> [[TMP5]], <i64 2, i64 2>
-; AVX-NEXT: [[TMP7:%.*]] = and <2 x i64> [[TMP6]], <i64 20, i64 20>
-; AVX-NEXT: [[TMP8:%.*]] = shufflevector <2 x i64> [[TMP7]], <2 x i64> poison, <2 x i32> <i32 1, i32 0>
+; AVX-NEXT: [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP4]], <2 x i64> poison, <2 x i32> <i32 1, i32 poison>
+; AVX-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> [[TMP5]], i64 [[ADD]], i32 1
+; AVX-NEXT: [[TMP7:%.*]] = shl <2 x i64> [[TMP6]], <i64 2, i64 2>
+; AVX-NEXT: [[TMP8:%.*]] = and <2 x i64> [[TMP7]], <i64 20, i64 20>
; AVX-NEXT: [[TMP9:%.*]] = lshr <2 x i64> [[TMP4]], <i64 6, i64 6>
; AVX-NEXT: [[TMP10:%.*]] = add nuw nsw <2 x i64> [[TMP8]], [[TMP9]]
; AVX-NEXT: store <2 x i64> [[TMP10]], ptr [[ARRAYIDX2_2]], align 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll
index c051d909f752..ec90ca9bc674 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll
@@ -18,9 +18,9 @@
define i32 @reduce_and4(i32 %acc, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3, <4 x i32> %v4) {
; SSE2-LABEL: @reduce_and4(
; SSE2-NEXT: entry:
-; SSE2-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; SSE2-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE2-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP0]])
-; SSE2-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; SSE2-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE2-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP2]])
; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP1]], [[TMP3]]
; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
@@ -28,9 +28,9 @@ define i32 @reduce_and4(i32 %acc, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3, <
;
; SSE42-LABEL: @reduce_and4(
; SSE42-NEXT: entry:
-; SSE42-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; SSE42-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE42-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP0]])
-; SSE42-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 1, i32 0, i32 2, i32 3, i32 5, i32 4, i32 6, i32 7>
+; SSE42-NEXT: [[TMP2:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE42-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP2]])
; SSE42-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP1]], [[TMP3]]
; SSE42-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
@@ -92,18 +92,18 @@ entry:
define i32 @reduce_and4_transpose(i32 %acc, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3, <4 x i32> %v4) {
; SSE2-LABEL: @reduce_and4_transpose(
-; SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE2-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP1]])
-; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE2-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]])
; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP2]], [[TMP4]]
; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
; SSE2-NEXT: ret i32 [[OP_RDX1]]
;
; SSE42-LABEL: @reduce_and4_transpose(
-; SSE42-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; SSE42-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[V4:%.*]], <4 x i32> [[V3:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE42-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP1]])
-; SSE42-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
+; SSE42-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[V2:%.*]], <4 x i32> [[V1:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; SSE42-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP3]])
; SSE42-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP2]], [[TMP4]]
; SSE42-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]]
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll
index b5533463c393..1a6ff2385905 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll
@@ -17,13 +17,12 @@ define i1 @test(ptr %arg, ptr %i233, i64 %i241, ptr %i235, ptr %i237, ptr %i227)
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x ptr> [[TMP3]], <8 x ptr> poison, <4 x i32> <i32 2, i32 0, i32 poison, i32 poison>
; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x ptr> [[TMP5]], ptr [[I245]], i32 2
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x ptr> [[TMP6]], ptr [[I248]], i32 3
-; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x ptr> [[TMP7]], <4 x ptr> poison, <8 x i32> <i32 2, i32 0, i32 1, i32 3, i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x ptr> [[TMP1]], <8 x ptr> <ptr poison, ptr poison, ptr null, ptr null, ptr null, ptr null, ptr null, ptr null>, <8 x i32> <i32 0, i32 1, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x ptr> [[TMP7]], <4 x ptr> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x ptr> [[TMP1]], <8 x ptr> <ptr poison, ptr null, ptr poison, ptr null, ptr null, ptr null, ptr null, ptr null>, <8 x i32> <i32 1, i32 9, i32 0, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <8 x ptr> [[TMP8]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <8 x i1> [[TMP10]], <8 x i1> poison, <8 x i32> <i32 1, i32 2, i32 0, i32 3, i32 4, i32 5, i32 6, i32 7>
-; CHECK-NEXT: [[TMP12:%.*]] = or <8 x i1> [[TMP4]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> [[TMP12]])
-; CHECK-NEXT: [[OP_RDX:%.*]] = and i1 [[TMP13]], false
+; CHECK-NEXT: [[TMP11:%.*]] = or <8 x i1> [[TMP4]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> [[TMP11]])
+; CHECK-NEXT: [[OP_RDX:%.*]] = and i1 [[TMP12]], false
; CHECK-NEXT: ret i1 [[OP_RDX]]
;
bb:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-reused-masked-gather.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder-reused-masked-gather.ll
index f65f61975a61..cd7ad210ca56 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder-reused-masked-gather.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder-reused-masked-gather.ll
@@ -8,12 +8,11 @@ define void @test(ptr noalias %0, ptr %p) {
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr float, <8 x ptr> [[TMP3]], <8 x i64> <i64 15, i64 4, i64 5, i64 0, i64 2, i64 6, i64 7, i64 8>
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP0:%.*]], i64 2
; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> [[TMP4]], i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x float> poison)
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x float> [[TMP6]], <8 x float> poison, <16 x i32> <i32 4, i32 3, i32 0, i32 1, i32 2, i32 0, i32 1, i32 2, i32 0, i32 2, i32 5, i32 6, i32 7, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x float> [[TMP6]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 0, i32 1, i32 2, i32 0, i32 3, i32 2, i32 4, i32 5, i32 6, i32 7, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x float> [[TMP6]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x float> [[TMP8]], <16 x float> <float poison, float poison, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x float> [[TMP8]], <16 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison, float 0.000000e+00, float poison, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 1, i32 24, i32 0, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; CHECK-NEXT: [[TMP10:%.*]] = fadd reassoc nsz arcp contract afn <16 x float> [[TMP7]], [[TMP9]]
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <16 x float> [[TMP10]], <16 x float> poison, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 1, i32 9, i32 0, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-NEXT: store <16 x float> [[TMP11]], ptr [[TMP5]], align 4
+; CHECK-NEXT: store <16 x float> [[TMP10]], ptr [[TMP5]], align 4
; CHECK-NEXT: ret void
;
%2 = getelementptr inbounds float, ptr %p, i64 2
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-vf-to-resize.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder-vf-to-resize.ll
index af606fc3a738..d3c978412cdd 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reorder-vf-to-resize.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder-vf-to-resize.ll
@@ -6,7 +6,7 @@ define void @main(ptr %0) {
; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[TMP0:%.*]], align 8
; CHECK-NEXT: [[TMP3:%.*]] = fsub <2 x double> zeroinitializer, [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> zeroinitializer, [[TMP2]]
-; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[TMP4]], <4 x i32> <i32 1, i32 2, i32 1, i32 2>
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[TMP4]], <4 x i32> <i32 1, i32 2, i32 2, i32 1>
; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x double> [[TMP5]], zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = call <4 x double> @llvm.fabs.v4f64(<4 x double> [[TMP6]])
; CHECK-NEXT: [[TMP8:%.*]] = fcmp oeq <4 x double> [[TMP7]], zeroinitializer
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reorder.ll b/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reorder.ll
index c79e9b94278c..fb2b653aefc8 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reorder.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/scatter-vectorize-reorder.ll
@@ -12,10 +12,10 @@ define void @test() {
; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x float> zeroinitializer, [[TMP0]]
; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX10_I_I86]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr undef, align 4
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP0]], <2 x float> <float 0.000000e+00, float poison>, <2 x i32> <i32 2, i32 1>
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x float> poison, float [[TMP3]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> [[TMP5]], float [[TMP2]], i32 1
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> <float poison, float 0.000000e+00>, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <2 x float> [[TMP0]], <2 x float> <float poison, float 0.000000e+00>, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x float> poison, float [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> [[TMP5]], float [[TMP3]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> <float 0.000000e+00, float poison>, <2 x i32> <i32 2, i32 0>
; CHECK-NEXT: [[TMP8:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP4]], <2 x float> [[TMP6]], <2 x float> [[TMP7]])
; CHECK-NEXT: br i1 false, label [[BB2:%.*]], label [[BB3:%.*]]
; CHECK: bb2:
@@ -23,12 +23,11 @@ define void @test() {
; CHECK-NEXT: br label [[BB3]]
; CHECK: bb3:
; CHECK-NEXT: [[TMP10:%.*]] = phi <2 x float> [ [[TMP9]], [[BB2]] ], [ zeroinitializer, [[BB1]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <2 x float> [[TMP10]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x float> [[TMP1]], [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = fadd <2 x float> [[TMP12]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x float> [[TMP1]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x float> [[TMP11]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = fsub <2 x float> [[TMP12]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = fsub <2 x float> [[TMP13]], zeroinitializer
-; CHECK-NEXT: [[TMP15:%.*]] = fsub <2 x float> [[TMP14]], zeroinitializer
-; CHECK-NEXT: store <2 x float> [[TMP15]], ptr [[ARRAYIDX21_I]], align 16
+; CHECK-NEXT: store <2 x float> [[TMP14]], ptr [[ARRAYIDX21_I]], align 16
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll b/llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll
index 8d1d257820f0..9e3ba05f88da 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/shrink_after_reorder2.ll
@@ -9,10 +9,10 @@ define void @foo(ptr %this, ptr %p, i32 %add7) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> <i32 poison, i32 undef>, i32 [[ADD7:%.*]], i32 0
; CHECK-NEXT: [[TMP1:%.*]] = sdiv <2 x i32> [[TMP0]], <i32 2, i32 2>
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <4 x i32> <i32 1, i32 1, i32 0, i32 0>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 1, i32 0>
; CHECK-NEXT: switch i32 undef, label [[SW_EPILOG:%.*]] [
-; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
-; CHECK-NEXT: i32 2, label [[SW_BB]]
+; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_BB]]
; CHECK-NEXT: ]
; CHECK: sw.bb:
; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i32> [[TMP1]], <i32 -1, i32 -1>
@@ -21,10 +21,11 @@ define void @foo(ptr %this, ptr %p, i32 %add7) {
; CHECK-NEXT: br label [[SW_EPILOG]]
; CHECK: sw.epilog:
; CHECK-NEXT: [[TMP6:%.*]] = phi <2 x i32> [ undef, [[ENTRY:%.*]] ], [ [[TMP5]], [[SW_BB]] ]
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP6]], <2 x i32> poison, <4 x i32> <i32 1, i32 1, i32 0, i32 0>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP6]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 1, i32 0>
; CHECK-NEXT: [[TMP8:%.*]] = sub <4 x i32> undef, [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i32> [[TMP8]], [[TMP7]]
-; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr [[P:%.*]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i32> [[TMP9]], <4 x i32> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x i32> [[TMP10]], ptr [[P:%.*]], align 4
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll b/llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll
index 9584a663b2d4..46cca9b078ac 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/vec3-reorder-reshuffle.ll
@@ -182,19 +182,18 @@ define i32 @reorder_indices_1(float %0) {
; CHECK-NEXT: [[TMP4:%.*]] = fneg float [[TMP3]]
; CHECK-NEXT: [[NEG11_I:%.*]] = fmul float [[TMP4]], [[TMP0]]
; CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.fmuladd.f32(float [[TMP1]], float 0.000000e+00, float [[NEG11_I]])
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x float> poison, float [[TMP1]], i32 0
-; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x float> [[TMP6]], <2 x float> [[TMP2]], <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> <i32 1, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x float> [[TMP6]], float [[TMP1]], i32 1
; CHECK-NEXT: [[TMP8:%.*]] = fneg <2 x float> [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i32 0
; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x float> [[TMP9]], <2 x float> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = fmul <2 x float> [[TMP8]], [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <2 x float> [[TMP11]], <2 x float> poison, <2 x i32> <i32 1, i32 0>
-; CHECK-NEXT: [[TMP13:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> zeroinitializer, <2 x float> [[TMP12]])
-; CHECK-NEXT: [[TMP14:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP10]], <2 x float> [[TMP13]], <2 x float> zeroinitializer)
-; CHECK-NEXT: [[TMP15:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP5]], float 0.000000e+00)
-; CHECK-NEXT: [[TMP16:%.*]] = fmul <2 x float> [[TMP14]], zeroinitializer
-; CHECK-NEXT: [[MUL6_I_I_I:%.*]] = fmul float [[TMP15]], 0.000000e+00
-; CHECK-NEXT: store <2 x float> [[TMP16]], ptr [[NOR1]], align 4
+; CHECK-NEXT: [[TMP12:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP2]], <2 x float> zeroinitializer, <2 x float> [[TMP11]])
+; CHECK-NEXT: [[TMP13:%.*]] = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> [[TMP10]], <2 x float> [[TMP12]], <2 x float> zeroinitializer)
+; CHECK-NEXT: [[TMP14:%.*]] = call float @llvm.fmuladd.f32(float [[TMP0]], float [[TMP5]], float 0.000000e+00)
+; CHECK-NEXT: [[TMP15:%.*]] = fmul <2 x float> [[TMP13]], zeroinitializer
+; CHECK-NEXT: [[MUL6_I_I_I:%.*]] = fmul float [[TMP14]], 0.000000e+00
+; CHECK-NEXT: store <2 x float> [[TMP15]], ptr [[NOR1]], align 4
; CHECK-NEXT: store float [[MUL6_I_I_I]], ptr [[ARRAYIDX2_I265]], align 4
; CHECK-NEXT: ret i32 0
;
diff --git a/llvm/test/Transforms/SpeculativeExecution/PR46267.ll b/llvm/test/Transforms/SpeculativeExecution/PR46267.ll
index c27b492b4b87..d940ee6a7863 100644
--- a/llvm/test/Transforms/SpeculativeExecution/PR46267.ll
+++ b/llvm/test/Transforms/SpeculativeExecution/PR46267.ll
@@ -41,12 +41,16 @@ land.rhs: ; preds = %entry
; CHECK-NEXT: call void @llvm.dbg.declare(metadata ptr %y
; CHECK-NEXT: %a0 = load i32, ptr undef, align 1
; CHECK-NEXT: call void @llvm.dbg.value(metadata i32 %a0
+; CHECK-NEXT: call void @llvm.dbg.label
call void @llvm.dbg.label(metadata !11), !dbg !10
%y = alloca i32, align 4
call void @llvm.dbg.declare(metadata ptr %y, metadata !14, metadata !DIExpression()), !dbg !10
%a0 = load i32, ptr undef, align 1
call void @llvm.dbg.value(metadata i32 %a0, metadata !9, metadata !DIExpression()), !dbg !10
+ ;; RemoveDIs: Check a label that is attached to a hoisted instruction
+ ;; gets left behind (match intrinsic-style debug info behaviour).
+ call void @llvm.dbg.label(metadata !15), !dbg !10
%a2 = add i32 %i, 0
call void @llvm.dbg.value(metadata i32 %a2, metadata !13, metadata !DIExpression()), !dbg !10
@@ -82,3 +86,4 @@ attributes #1 = { nounwind readnone speculatable willreturn }
!12 = !DILocalVariable(name: "x", scope: !6, file: !1, line: 3, type: !4)
!13 = !DILocalVariable(name: "a2", scope: !6, file: !1, line: 3, type: !4)
!14 = !DILocalVariable(name: "y", scope: !6, file: !1, line: 3, type: !4)
+!15 = !DILabel(scope: !6, name: "label2", file: !1, line: 2)
diff --git a/llvm/test/Transforms/VectorCombine/RISCV/load-widening.ll b/llvm/test/Transforms/VectorCombine/RISCV/load-widening.ll
new file mode 100644
index 000000000000..0a43ad2f9a36
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/RISCV/load-widening.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=vector-combine -S -mtriple=riscv32 -mattr=+v | FileCheck %s
+; RUN: opt < %s -passes=vector-combine -S -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+define void @fixed_load_scalable_src(ptr %p) {
+; CHECK-LABEL: define void @fixed_load_scalable_src(
+; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: store <vscale x 4 x i16> zeroinitializer, ptr [[P]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i16>, ptr [[P]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[TMP0]], <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: ret void
+;
+entry:
+ store <vscale x 4 x i16> zeroinitializer, ptr %p
+ %0 = load <4 x i16>, ptr %p
+ %1 = shufflevector <4 x i16> %0, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+ ret void
+}
diff --git a/llvm/test/tools/llc/new-pm/pipeline.mir b/llvm/test/tools/llc/new-pm/pipeline.mir
index c7dda4b6d135..fcc7d4f8f02e 100644
--- a/llvm/test/tools/llc/new-pm/pipeline.mir
+++ b/llvm/test/tools/llc/new-pm/pipeline.mir
@@ -1,7 +1,6 @@
# RUN: llc -mtriple=x86_64-pc-linux-gnu -x mir -passes=no-op-machine-function --print-pipeline-passes -filetype=null < %s | FileCheck %s --match-full-lines
-# CHECK: IR pipeline: PrintMIRPreparePass
-# CHECK: MIR pipeline: no-op-machine-function,print,FreeMachineFunctionPass
+# CHECK: machine-function(no-op-machine-function),PrintMIRPreparePass,machine-function(print,FreeMachineFunctionPass)
---
name: f
diff --git a/llvm/test/tools/llc/new-pm/start-stop.ll b/llvm/test/tools/llc/new-pm/start-stop.ll
index c25e45d1f7ab..8c795a7a70f8 100644
--- a/llvm/test/tools/llc/new-pm/start-stop.ll
+++ b/llvm/test/tools/llc/new-pm/start-stop.ll
@@ -1,4 +1,5 @@
-; RUN: llc -mtriple=x86_64-pc-linux-gnu -enable-new-pm -print-pipeline-passes -start-before=mergeicmps -stop-after=gc-lowering -filetype=null %s | FileCheck --match-full-lines %s
-
-; CHECK: IR pipeline: function(mergeicmps,expand-memcmp,gc-lowering)
+; RUN: llc -mtriple=x86_64-pc-linux-gnu -enable-new-pm -print-pipeline-passes -start-before=mergeicmps -stop-after=gc-lowering -filetype=null %s | FileCheck --match-full-lines %s --check-prefix=NULL
+; RUN: llc -mtriple=x86_64-pc-linux-gnu -enable-new-pm -print-pipeline-passes -start-before=mergeicmps -stop-after=gc-lowering -o /dev/null %s | FileCheck --match-full-lines %s --check-prefix=OBJ
+; NULL: function(mergeicmps,expand-memcmp,gc-lowering)
+; OBJ: function(mergeicmps,expand-memcmp,gc-lowering),PrintMIRPreparePass,machine-function(print)
diff --git a/llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s b/llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s
new file mode 100644
index 000000000000..207822b61839
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/AArch64/HiSilicon/tsv110-forwarding.s
@@ -0,0 +1,83 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=aarch64 -mcpu=tsv110 --instruction-info=0 --resource-pressure=0 --timeline --iterations=1 < %s | FileCheck %s
+
+# LLVM-MCA-BEGIN madd nobypass
+mul x0, x1, x2
+add x0, x0, x1
+add x0, x0, x1
+add x0, x0, x1
+# LLVM-MCA-END
+
+# LLVM-MCA-BEGIN madd bypass
+mul x0, x1, x2
+madd x0, x1, x2, x0
+madd x0, x1, x2, x0
+madd x0, x0, x0, x0
+# LLVM-MCA-END
+
+# CHECK: [0] Code Region - madd nobypass
+
+# CHECK: Iterations: 1
+# CHECK-NEXT: Instructions: 4
+# CHECK-NEXT: Total Cycles: 10
+# CHECK-NEXT: Total uOps: 4
+
+# CHECK: Dispatch Width: 4
+# CHECK-NEXT: uOps Per Cycle: 0.40
+# CHECK-NEXT: IPC: 0.40
+# CHECK-NEXT: Block RThroughput: 1.0
+
+# CHECK: Timeline view:
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeER . mul x0, x1, x2
+# CHECK-NEXT: [0,1] D====eER . add x0, x0, x1
+# CHECK-NEXT: [0,2] D=====eER. add x0, x0, x1
+# CHECK-NEXT: [0,3] D======eER add x0, x0, x1
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 1 1.0 1.0 0.0 mul x0, x1, x2
+# CHECK-NEXT: 1. 1 5.0 0.0 0.0 add x0, x0, x1
+# CHECK-NEXT: 2. 1 6.0 0.0 0.0 add x0, x0, x1
+# CHECK-NEXT: 3. 1 7.0 0.0 0.0 add x0, x0, x1
+# CHECK-NEXT: 1 4.8 0.3 0.0 <total>
+
+# CHECK: [1] Code Region - madd bypass
+
+# CHECK: Iterations: 1
+# CHECK-NEXT: Instructions: 4
+# CHECK-NEXT: Total Cycles: 13
+# CHECK-NEXT: Total uOps: 4
+
+# CHECK: Dispatch Width: 4
+# CHECK-NEXT: uOps Per Cycle: 0.31
+# CHECK-NEXT: IPC: 0.31
+# CHECK-NEXT: Block RThroughput: 4.0
+
+# CHECK: Timeline view:
+# CHECK-NEXT: 012
+# CHECK-NEXT: Index 0123456789
+
+# CHECK: [0,0] DeeeeER . . mul x0, x1, x2
+# CHECK-NEXT: [0,1] D=eeeeER . . madd x0, x1, x2, x0
+# CHECK-NEXT: [0,2] D==eeeeER . . madd x0, x1, x2, x0
+# CHECK-NEXT: [0,3] D======eeeeER madd x0, x0, x0, x0
+
+# CHECK: Average Wait times (based on the timeline view):
+# CHECK-NEXT: [0]: Executions
+# CHECK-NEXT: [1]: Average time spent waiting in a scheduler's queue
+# CHECK-NEXT: [2]: Average time spent waiting in a scheduler's queue while ready
+# CHECK-NEXT: [3]: Average time elapsed from WB until retire stage
+
+# CHECK: [0] [1] [2] [3]
+# CHECK-NEXT: 0. 1 1.0 1.0 0.0 mul x0, x1, x2
+# CHECK-NEXT: 1. 1 2.0 0.0 0.0 madd x0, x1, x2, x0
+# CHECK-NEXT: 2. 1 3.0 0.0 0.0 madd x0, x1, x2, x0
+# CHECK-NEXT: 3. 1 7.0 0.0 0.0 madd x0, x0, x0, x0
+# CHECK-NEXT: 1 3.3 0.3 0.0 <total>
diff --git a/llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test
new file mode 100644
index 000000000000..f4c73de7ca6c
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib-unsupported.test
@@ -0,0 +1,32 @@
+# UNSUPPORTED: zlib
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-readobj -z -p .a -x .b %t 2>&1 | FileCheck %s -DFILE=%t
+
+# CHECK: String dump of section '.a':
+# CHECK-NEXT: warning: '[[FILE]]': LLVM was not built with LLVM_ENABLE_ZLIB or did not find zlib at build time
+# CHECK-NEXT: [ 0] .
+# CHECK-NEXT: [ 8] .
+# CHECK-NEXT: [ 10] .
+# CHECK-NEXT: [ 18] x.c.
+# CHECK-NEXT: [ 1e] .
+# CHECK-NEXT: [ 20] .
+# CHECK-NEXT: Hex dump of section '.b':
+# CHECK-NEXT: warning: '[[FILE]]': LLVM was not built with LLVM_ENABLE_ZLIB or did not find zlib at build time
+# CHECK-NEXT: 0x00000000 01000000 00000000 01000000 00000000 ................
+# CHECK-NEXT: 0x00000010 01000000 00000000 789c6304 00000200 ........x.c.....
+# CHECK-NEXT: 0x00000020 02 .
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+Sections:
+ - Name: .a
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 010000000000000001000000000000000100000000000000789c63040000020002
+ - Name: .b
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 010000000000000001000000000000000100000000000000789c63040000020002
diff --git a/llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test
new file mode 100644
index 000000000000..ea7a8854eb1a
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/ELF/decompress-zlib.test
@@ -0,0 +1,76 @@
+# REQUIRES: zlib
+## Test --decompress/-z.
+
+# RUN: yaml2obj %s -o %t
+
+# RUN: llvm-readelf -z -x .strings -x .not_null_terminated %t | FileCheck %s --check-prefix=HEX
+# RUN: llvm-readobj --decompress -p .strings -p .not_null_terminated %t | FileCheck %s --check-prefix=STR
+
+# HEX: Hex dump of section '.strings':
+# HEX-NEXT: 0x00000000 68657265 00617265 00736f6d 65007374 here.are.some.st
+# HEX-NEXT: 0x00000010 72696e67 7300 rings.
+# HEX: Hex dump of section '.not_null_terminated':
+# HEX-NEXT: 0x00000000 6e6f006e 756c6c no.null
+
+# STR: String dump of section '.strings':
+# STR-NEXT: [ 0] here
+# STR-NEXT: [ 5] are
+# STR-NEXT: [ 9] some
+# STR-NEXT: [ e] strings
+# STR-EMPTY:
+# STR-NEXT: String dump of section '.not_null_terminated':
+# STR-NEXT: [ 0] no
+# STR-NEXT: [ 3] null{{$}}
+# STR-NOT: {{.}}
+
+# RUN: llvm-readobj -x .strings -p .not_null_terminated %t | FileCheck %s --check-prefix=COMPRESSED
+
+# COMPRESSED: String dump of section '.not_null_terminated':
+# COMPRESSED-NEXT: [ 0] no
+# COMPRESSED-NEXT: [ 3] null
+# COMPRESSED-NEXT: Hex dump of section '.strings':
+# COMPRESSED-NEXT: 0x00000000 01000000 00000000 16000000 00000000 ................
+# COMPRESSED-NEXT: 0x00000010 00000000 00000000 789ccb48 2d4a6548 ........x..H-JeH
+# COMPRESSED-NEXT: 0x00000020 04e2e2fc 5c205152 9499975e cc000058 ....\ QR...^...X
+# COMPRESSED-NEXT: 0x00000030 2e079b ...
+
+# RUN: llvm-readelf -z -p .invalid1 -x .invalid2 -x .invalid3 %t 2>&1 | FileCheck %s -DFILE=%t --check-prefix=INVALID
+
+# INVALID: String dump of section '.invalid1':
+# INVALID-NEXT: warning: '[[FILE]]': corrupted compressed section header
+# INVALID-NEXT: [ 0] .
+# INVALID-NEXT: Hex dump of section '.invalid2':
+# INVALID-NEXT: warning: '[[FILE]]': zlib error: Z_DATA_ERROR
+# INVALID-NEXT: 0x00000000 01000000 00000000 16000000 00000000 ................
+# INVALID-NEXT: 0x00000010 00000000 00000000 78 ........x
+# INVALID-EMPTY:
+# INVALID-NEXT: Hex dump of section '.invalid3':
+# INVALID-NEXT: warning: '[[FILE]]': unsupported compression type (3)
+# INVALID-NEXT: 0x00000000 03000000 00000000 04000000 00000000 ................
+# INVALID-NEXT: 0x00000010 00000000 00000000 789c6360 ........x.c`
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+Sections:
+ - Name: .strings
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 010000000000000016000000000000000000000000000000789ccb482d4a654804e2e2fc5c2051529499975ecc0000582e079b
+ - Name: .not_null_terminated
+ Type: SHT_PROGBITS
+ Content: 6e6f006e756c6c
+ - Name: .invalid1
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 01
+ - Name: .invalid2
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 01000000000000001600000000000000000000000000000078
+ - Name: .invalid3
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 030000000000000004000000000000000000000000000000789c6360
diff --git a/llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test b/llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test
new file mode 100644
index 000000000000..65da952687f5
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/ELF/decompress-zstd-unsupported.test
@@ -0,0 +1,31 @@
+# UNSUPPORTED: zstd
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-readobj -z -p .a -x .b %t 2>&1 | FileCheck %s -DFILE=%t
+
+# CHECK: String dump of section '.a':
+# CHECK-NEXT: warning: '[[FILE]]': LLVM was not built with LLVM_ENABLE_ZSTD or did not find zstd at build time
+# CHECK-NEXT: [ 0] .
+# CHECK-NEXT: [ 8] .
+# CHECK-NEXT: [ 10] .
+# CHECK-NEXT: [ 18] (./. ..
+# CHECK-NEXT: [ 21] .
+# CHECK-NEXT: Hex dump of section '.b':
+# CHECK-NEXT: warning: '[[FILE]]': LLVM was not built with LLVM_ENABLE_ZSTD or did not find zstd at build time
+# CHECK-NEXT: 0x00000000 02000000 00000000 01000000 00000000 ................
+# CHECK-NEXT: 0x00000010 01000000 00000000 28b52ffd 20010900 ........(./. ...
+# CHECK-NEXT: 0x00000020 0001 ..
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+Sections:
+ - Name: .a
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 02000000000000000100000000000000010000000000000028b52ffd200109000001
+ - Name: .b
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 02000000000000000100000000000000010000000000000028b52ffd200109000001
diff --git a/llvm/test/tools/llvm-readobj/ELF/decompress-zstd.test b/llvm/test/tools/llvm-readobj/ELF/decompress-zstd.test
new file mode 100644
index 000000000000..519db879b18c
--- /dev/null
+++ b/llvm/test/tools/llvm-readobj/ELF/decompress-zstd.test
@@ -0,0 +1,28 @@
+# REQUIRES: zstd
+## Test --decompress/-z for zstd.
+
+# RUN: yaml2obj %s -o %t
+
+# RUN: llvm-readelf -z -x .strings %t | FileCheck %s --check-prefix=HEX
+# RUN: llvm-readobj --decompress -p .strings %t | FileCheck %s --check-prefix=STR
+
+# HEX: Hex dump of section '.strings':
+# HEX-NEXT: 0x00000000 68657265 00617265 00736f6d 65007374 here.are.some.st
+# HEX-NEXT: 0x00000010 72696e67 7300 rings.
+
+# STR: String dump of section '.strings':
+# STR-NEXT: [ 0] here
+# STR-NEXT: [ 5] are
+# STR-NEXT: [ 9] some
+# STR-NEXT: [ e] strings
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ Type: ET_REL
+Sections:
+ - Name: .strings
+ Type: SHT_PROGBITS
+ Flags: [SHF_COMPRESSED]
+ Content: 02000000000000001600000000000000000000000000000028b52ffd2016b10000686572650061726500736f6d6500737472696e677300
diff --git a/llvm/test/tools/llvm-symbolizer/wasm-basic.s b/llvm/test/tools/llvm-symbolizer/wasm-basic.s
index cc189abcfca8..1f425e525931 100644
--- a/llvm/test/tools/llvm-symbolizer/wasm-basic.s
+++ b/llvm/test/tools/llvm-symbolizer/wasm-basic.s
@@ -1,24 +1,59 @@
# REQUIRES: webassembly-registered-target
# RUN: llvm-mc -triple=wasm32-unknown-unknown -filetype=obj %s -o %t.o -g
+# RUN: llvm-symbolizer --basenames --output-style=GNU -e %t.o 1 2 3 4 5 6 7 8 9 10 11 12 13 | FileCheck %s
foo:
.functype foo () -> ()
nop
+ return
end_function
bar:
.functype bar (i32) -> (i32)
local.get 0
+ nop
return
end_function
-# RUN: llvm-symbolizer -e %t.o 3 4 7 8 | FileCheck %s
-## Byte 1 is the function length and 2 is the locals declaration.
-## Currently no line corresponds to them.
-## TODO: create a loc for .functype?
-## Test 2 functions to ensure wasm's function-sections system works.
-# CHECK: wasm-basic.s:6:0
-# CHECK: wasm-basic.s:7:0
-# CHECK: wasm-basic.s:11:0
-# CHECK: wasm-basic.s:11:0
+## Symbols start from (including) the function length and should cover all the
+## way to the next symbol start.
+## TODO: create a loc for .functype? It could go with the local declarations.
+
+## Byte 1 is the function length, has no loc but the symbol table considers it
+## the start of the function
+# CHECK: foo
+# CHECK-NEXT: ??:0
+## Byte 2 is the local declaration, but for some reason DWARF is marking it as line 7.
+## TODO: figure out why.
+# CHECK-NEXT: foo
+# CHECK-NEXT: wasm-basic.s:7
+## Byte 3 is actually the nop, line 7
+# CHECK-NEXT: foo
+# CHECK-NEXT: wasm-basic.s:7
+## Byte 4 is the return, line 8
+# CHECK-NEXT: foo
+# CHECK-NEXT: wasm-basic.s:8
+## Byte 5 is the end_function, line 9
+# CHECK-NEXT: foo
+# CHECK-NEXT: wasm-basic.s:9
+## Byte 6 is bar's function length, symbol table considers it part of bar
+# CHECK-NEXT: bar
+# CHECK-NEXT: ??:0
+## Byte 7 bar's local declaration, but DWARF marks it as line 13, like above
+# CHECK-NEXT: bar
+# CHECK-NEXT: wasm-basic.s:13
+## Byte 8 and 9 are actually the local.get on line 13
+# CHECK-NEXT: bar
+# CHECK-NEXT: wasm-basic.s:13
+# CHECK-NEXT: bar
+# CHECK-NEXT: wasm-basic.s:13
+## Byte 10 is the nop
+# CHECK-NEXT: bar
+# CHECK-NEXT: wasm-basic.s:14
+## Byte b is the return
+# CHECK-NEXT: bar
+# CHECK-NEXT: wasm-basic.s:15
+## Byte c is end_function
+# CHECK-NEXT: bar
+# CHECK-NEXT: wasm-basic.s:16
diff --git a/llvm/tools/llc/NewPMDriver.cpp b/llvm/tools/llc/NewPMDriver.cpp
index c3288ef9d080..6ae1b8db5e11 100644
--- a/llvm/tools/llc/NewPMDriver.cpp
+++ b/llvm/tools/llc/NewPMDriver.cpp
@@ -89,30 +89,6 @@ bool LLCDiagnosticHandler::handleDiagnostics(const DiagnosticInfo &DI) {
static llvm::ExitOnError ExitOnErr;
-static void RunPasses(bool BOS, ToolOutputFile *Out, Module *M,
- LLVMContext &Context, SmallString<0> &Buffer,
- ModulePassManager *MPM, ModuleAnalysisManager *MAM,
- MachineFunctionPassManager &MFPM,
- MachineFunctionAnalysisManager &MFAM) {
- assert(M && "invalid input module!");
-
- // Before executing passes, print the final values of the LLVM options.
- cl::PrintOptionValues();
-
- if (MPM) {
- assert(MAM && "expect a ModuleAnalysisManager!");
- MPM->run(*M, *MAM);
- }
-
- ExitOnErr(MFPM.run(*M, MFAM));
-
- if (Context.getDiagHandlerPtr()->HasErrors)
- exit(1);
-
- if (BOS)
- Out->os() << Buffer;
-}
-
int llvm::compileModuleWithNewPM(
StringRef Arg0, std::unique_ptr<Module> M, std::unique_ptr<MIRParser> MIR,
std::unique_ptr<TargetMachine> Target, std::unique_ptr<ToolOutputFile> Out,
@@ -131,16 +107,6 @@ int llvm::compileModuleWithNewPM(
raw_pwrite_stream *OS = &Out->os();
- // Manually do the buffering rather than using buffer_ostream,
- // so we can memcmp the contents in CompileTwice mode in future.
- SmallString<0> Buffer;
- std::unique_ptr<raw_svector_ostream> BOS;
- if ((codegen::getFileType() != CodeGenFileType::AssemblyFile &&
- !Out->os().supportsSeeking())) {
- BOS = std::make_unique<raw_svector_ostream>(Buffer);
- OS = BOS.get();
- }
-
// Fetch options from TargetPassConfig
CGPassBuilderOption Opt = getCGPassBuilderOption();
Opt.DisableVerify = NoVerify;
@@ -158,20 +124,19 @@ int llvm::compileModuleWithNewPM(
FunctionAnalysisManager FAM;
CGSCCAnalysisManager CGAM;
ModuleAnalysisManager MAM;
+ MachineFunctionAnalysisManager MFAM;
PassBuilder PB(Target.get(), PipelineTuningOptions(), std::nullopt, &PIC);
PB.registerModuleAnalyses(MAM);
PB.registerCGSCCAnalyses(CGAM);
PB.registerFunctionAnalyses(FAM);
PB.registerLoopAnalyses(LAM);
- PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+ PB.registerMachineFunctionAnalyses(MFAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM, &MFAM);
FAM.registerPass([&] { return TargetLibraryAnalysis(TLII); });
MAM.registerPass([&] { return MachineModuleAnalysis(MMI); });
- MachineFunctionAnalysisManager MFAM(FAM, MAM);
-
ModulePassManager MPM;
- MachineFunctionPassManager MFPM;
if (!PassPipeline.empty()) {
// Construct a custom pass pipeline that starts after instruction
@@ -182,49 +147,39 @@ int llvm::compileModuleWithNewPM(
return 1;
}
- ExitOnErr(PB.parsePassPipeline(MFPM, PassPipeline));
+ // FIXME: verify that there are no IR passes.
+ ExitOnErr(PB.parsePassPipeline(MPM, PassPipeline));
MPM.addPass(PrintMIRPreparePass(*OS));
+ MachineFunctionPassManager MFPM;
MFPM.addPass(PrintMIRPass(*OS));
MFPM.addPass(FreeMachineFunctionPass());
+ MPM.addPass(createModuleToMachineFunctionPassAdaptor(std::move(MFPM)));
- auto &MMI = MFAM.getResult<MachineModuleAnalysis>(*M).getMMI();
if (MIR->parseMachineFunctions(*M, MMI))
return 1;
} else {
- ExitOnErr(LLVMTM.buildCodeGenPipeline(MPM, MFPM, MFAM, *OS,
- DwoOut ? &DwoOut->os() : nullptr,
- FileType, Opt, &PIC));
-
- auto StartStopInfo = TargetPassConfig::getStartStopInfo(PIC);
- assert(StartStopInfo && "Expect StartStopInfo!");
-
- if (auto StopPassName = StartStopInfo->StopPass; !StopPassName.empty()) {
- MFPM.addPass(PrintMIRPass(*OS));
- MFPM.addPass(FreeMachineFunctionPass());
- }
+ ExitOnErr(LLVMTM.buildCodeGenPipeline(
+ MPM, *OS, DwoOut ? &DwoOut->os() : nullptr, FileType, Opt, &PIC));
}
if (PrintPipelinePasses) {
- std::string IRPipeline;
- raw_string_ostream IRSOS(IRPipeline);
- MPM.printPipeline(IRSOS, [&PIC](StringRef ClassName) {
- auto PassName = PIC.getPassNameForClassName(ClassName);
- return PassName.empty() ? ClassName : PassName;
- });
- outs() << "IR pipeline: " << IRPipeline << '\n';
-
- std::string MIRPipeline;
- raw_string_ostream MIRSOS(MIRPipeline);
- MFPM.printPipeline(MIRSOS, [&PIC](StringRef ClassName) {
+ std::string PipelineStr;
+ raw_string_ostream OS(PipelineStr);
+ MPM.printPipeline(OS, [&PIC](StringRef ClassName) {
auto PassName = PIC.getPassNameForClassName(ClassName);
return PassName.empty() ? ClassName : PassName;
});
- outs() << "MIR pipeline: " << MIRPipeline << '\n';
+ outs() << PipelineStr << '\n';
return 0;
}
- RunPasses(BOS.get(), Out.get(), M.get(), Context, Buffer, &MPM, &MAM, MFPM,
- MFAM);
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ MPM.run(*M, MAM);
+
+ if (Context.getDiagHandlerPtr()->HasErrors)
+ exit(1);
// Declare success.
Out->keep();
diff --git a/llvm/tools/llvm-gsymutil/Opts.td b/llvm/tools/llvm-gsymutil/Opts.td
index 740291479f93..3aabc8029ccb 100644
--- a/llvm/tools/llvm-gsymutil/Opts.td
+++ b/llvm/tools/llvm-gsymutil/Opts.td
@@ -35,3 +35,6 @@ defm address : Eq<"address", "Lookup an address in a GSYM file">;
def addresses_from_stdin :
FF<"addresses-from-stdin",
"Lookup addresses in a GSYM file that are read from stdin\nEach input line is expected to be of the following format: <addr> <gsym-path>">;
+defm json_summary_file :
+ Eq<"json-summary-file",
+ "Output a categorized summary of errors into the JSON file specified.">;
diff --git a/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp b/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp
index 2de9c76fd68c..00a24cdb33fe 100644
--- a/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp
+++ b/llvm/tools/llvm-gsymutil/llvm-gsymutil.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/LLVMDriver.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -87,6 +88,7 @@ static std::vector<std::string> InputFilenames;
static std::string ConvertFilename;
static std::vector<std::string> ArchFilters;
static std::string OutputFilename;
+static std::string JsonSummaryFile;
static bool Verify;
static unsigned NumThreads;
static uint64_t SegmentSize;
@@ -138,6 +140,9 @@ static void parseArgs(int argc, char **argv) {
if (const llvm::opt::Arg *A = Args.getLastArg(OPT_out_file_EQ))
OutputFilename = A->getValue();
+ if (const llvm::opt::Arg *A = Args.getLastArg(OPT_json_summary_file_EQ))
+ JsonSummaryFile = A->getValue();
+
Verify = Args.hasArg(OPT_verify);
if (const llvm::opt::Arg *A = Args.getLastArg(OPT_num_threads_EQ)) {
@@ -515,10 +520,34 @@ int llvm_gsymutil_main(int argc, char **argv, const llvm::ToolContext &) {
// Call error() if we have an error and it will exit with a status of 1
if (auto Err = convertFileToGSYM(Aggregation))
error("DWARF conversion failed: ", std::move(Err));
+
// Report the errors from aggregator:
Aggregation.EnumerateResults([&](StringRef category, unsigned count) {
OS << category << " occurred " << count << " time(s)\n";
});
+ if (!JsonSummaryFile.empty()) {
+ std::error_code EC;
+ raw_fd_ostream JsonStream(JsonSummaryFile, EC, sys::fs::OF_Text);
+ if (EC) {
+ OS << "error opening aggregate error json file '" << JsonSummaryFile
+ << "' for writing: " << EC.message() << '\n';
+ return 1;
+ }
+
+ llvm::json::Object Categories;
+ uint64_t ErrorCount = 0;
+ Aggregation.EnumerateResults([&](StringRef Category, unsigned Count) {
+ llvm::json::Object Val;
+ Val.try_emplace("count", Count);
+ Categories.try_emplace(Category, std::move(Val));
+ ErrorCount += Count;
+ });
+ llvm::json::Object RootNode;
+ RootNode.try_emplace("error-categories", std::move(Categories));
+ RootNode.try_emplace("error-count", ErrorCount);
+
+ JsonStream << llvm::json::Value(std::move(RootNode));
+ }
return 0;
}
diff --git a/llvm/tools/llvm-link/llvm-link.cpp b/llvm/tools/llvm-link/llvm-link.cpp
index e6c219a8cd7e..9e7f2c3ebac4 100644
--- a/llvm/tools/llvm-link/llvm-link.cpp
+++ b/llvm/tools/llvm-link/llvm-link.cpp
@@ -393,8 +393,16 @@ static bool linkFiles(const char *argv0, LLVMContext &Context, Linker &L,
// Similar to some flags, internalization doesn't apply to the first file.
bool InternalizeLinkedSymbols = false;
for (const auto &File : Files) {
+ auto BufferOrErr = MemoryBuffer::getFileOrSTDIN(File);
+
+ // When we encounter a missing file, make sure we expose its name.
+ if (auto EC = BufferOrErr.getError())
+ if (EC == std::errc::no_such_file_or_directory)
+ ExitOnErr(createStringError(EC, "No such file or directory: '%s'",
+ File.c_str()));
+
std::unique_ptr<MemoryBuffer> Buffer =
- ExitOnErr(errorOrToExpected(MemoryBuffer::getFileOrSTDIN(File)));
+ ExitOnErr(errorOrToExpected(std::move(BufferOrErr)));
std::unique_ptr<Module> M =
identify_magic(Buffer->getBuffer()) == file_magic::archive
diff --git a/llvm/tools/llvm-readobj/ObjDumper.cpp b/llvm/tools/llvm-readobj/ObjDumper.cpp
index 59060ac217e3..0d3fea71aafd 100644
--- a/llvm/tools/llvm-readobj/ObjDumper.cpp
+++ b/llvm/tools/llvm-readobj/ObjDumper.cpp
@@ -14,6 +14,7 @@
#include "ObjDumper.h"
#include "llvm-readobj.h"
#include "llvm/Object/Archive.h"
+#include "llvm/Object/Decompressor.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
@@ -142,8 +143,23 @@ getSectionRefsByNameOrIndex(const object::ObjectFile &Obj,
return Ret;
}
+static void maybeDecompress(const object::ObjectFile &Obj,
+ StringRef SectionName, StringRef &SectionContent,
+ SmallString<0> &Out) {
+ Expected<object::Decompressor> Decompressor = object::Decompressor::create(
+ SectionName, SectionContent, Obj.isLittleEndian(), Obj.is64Bit());
+ if (!Decompressor)
+ reportWarning(Decompressor.takeError(), Obj.getFileName());
+ else if (auto Err = Decompressor->resizeAndDecompress(Out))
+ reportWarning(std::move(Err), Obj.getFileName());
+ else
+ SectionContent = Out;
+}
+
void ObjDumper::printSectionsAsString(const object::ObjectFile &Obj,
- ArrayRef<std::string> Sections) {
+ ArrayRef<std::string> Sections,
+ bool Decompress) {
+ SmallString<0> Out;
bool First = true;
for (object::SectionRef Section :
getSectionRefsByNameOrIndex(Obj, Sections)) {
@@ -156,12 +172,16 @@ void ObjDumper::printSectionsAsString(const object::ObjectFile &Obj,
StringRef SectionContent =
unwrapOrError(Obj.getFileName(), Section.getContents());
+ if (Decompress && Section.isCompressed())
+ maybeDecompress(Obj, SectionName, SectionContent, Out);
printAsStringList(SectionContent);
}
}
void ObjDumper::printSectionsAsHex(const object::ObjectFile &Obj,
- ArrayRef<std::string> Sections) {
+ ArrayRef<std::string> Sections,
+ bool Decompress) {
+ SmallString<0> Out;
bool First = true;
for (object::SectionRef Section :
getSectionRefsByNameOrIndex(Obj, Sections)) {
@@ -174,6 +194,8 @@ void ObjDumper::printSectionsAsHex(const object::ObjectFile &Obj,
StringRef SectionContent =
unwrapOrError(Obj.getFileName(), Section.getContents());
+ if (Decompress && Section.isCompressed())
+ maybeDecompress(Obj, SectionName, SectionContent, Out);
const uint8_t *SecContent = SectionContent.bytes_begin();
const uint8_t *SecEnd = SecContent + SectionContent.size();
diff --git a/llvm/tools/llvm-readobj/ObjDumper.h b/llvm/tools/llvm-readobj/ObjDumper.h
index 1d679453581b..3958dd3a3333 100644
--- a/llvm/tools/llvm-readobj/ObjDumper.h
+++ b/llvm/tools/llvm-readobj/ObjDumper.h
@@ -175,9 +175,9 @@ public:
void printAsStringList(StringRef StringContent, size_t StringDataOffset = 0);
void printSectionsAsString(const object::ObjectFile &Obj,
- ArrayRef<std::string> Sections);
+ ArrayRef<std::string> Sections, bool Decompress);
void printSectionsAsHex(const object::ObjectFile &Obj,
- ArrayRef<std::string> Sections);
+ ArrayRef<std::string> Sections, bool Decompress);
std::function<Error(const Twine &Msg)> WarningHandler;
void reportUniqueWarning(Error Err) const;
diff --git a/llvm/tools/llvm-readobj/Opts.td b/llvm/tools/llvm-readobj/Opts.td
index e2d93c6ec229..018facc278e8 100644
--- a/llvm/tools/llvm-readobj/Opts.td
+++ b/llvm/tools/llvm-readobj/Opts.td
@@ -20,6 +20,7 @@ def all : FF<"all", "Equivalent to setting: --file-header, --program-headers, --
def arch_specific : FF<"arch-specific", "Display architecture-specific information">;
def bb_addr_map : FF<"bb-addr-map", "Display the BB address map section">;
def cg_profile : FF<"cg-profile", "Display call graph profile section">;
+def decompress : FF<"decompress", "Dump decompressed section content when used with -x or -p">;
defm demangle : BB<"demangle", "Demangle symbol names", "Do not demangle symbol names (default)">;
def dependent_libraries : FF<"dependent-libraries", "Display the dependent libraries section">;
def dyn_relocations : FF<"dyn-relocations", "Display the dynamic relocation entries in the file">;
@@ -139,3 +140,4 @@ def : F<"u", "Alias for --unwind">, Alias<unwind>;
def : F<"X", "Alias for --extra-sym-info">, Alias<extra_sym_info>, Group<grp_elf>;
def : F<"V", "Alias for --version-info">, Alias<version_info>, Group<grp_elf>;
def : JoinedOrSeparate<["-"], "x">, Alias<hex_dump_EQ>, HelpText<"Alias for --hex-dump">, MetaVarName<"<name or index>">;
+def : F<"z", "Alias for --decompress">, Alias<decompress>;
diff --git a/llvm/tools/llvm-readobj/llvm-readobj.cpp b/llvm/tools/llvm-readobj/llvm-readobj.cpp
index f9d605d35244..979433d69011 100644
--- a/llvm/tools/llvm-readobj/llvm-readobj.cpp
+++ b/llvm/tools/llvm-readobj/llvm-readobj.cpp
@@ -97,6 +97,7 @@ static bool ArchSpecificInfo;
static bool BBAddrMap;
bool ExpandRelocs;
static bool CGProfile;
+static bool Decompress;
bool Demangle;
static bool DependentLibraries;
static bool DynRelocs;
@@ -212,6 +213,7 @@ static void parseOptions(const opt::InputArgList &Args) {
opts::ArchSpecificInfo = Args.hasArg(OPT_arch_specific);
opts::BBAddrMap = Args.hasArg(OPT_bb_addr_map);
opts::CGProfile = Args.hasArg(OPT_cg_profile);
+ opts::Decompress = Args.hasArg(OPT_decompress);
opts::Demangle = Args.hasFlag(OPT_demangle, OPT_no_demangle, false);
opts::DependentLibraries = Args.hasArg(OPT_dependent_libraries);
opts::DynRelocs = Args.hasArg(OPT_dyn_relocations);
@@ -439,9 +441,9 @@ static void dumpObject(ObjectFile &Obj, ScopedPrinter &Writer,
Dumper->printSymbols(opts::Symbols, opts::DynamicSymbols,
opts::ExtraSymInfo, SymComp);
if (!opts::StringDump.empty())
- Dumper->printSectionsAsString(Obj, opts::StringDump);
+ Dumper->printSectionsAsString(Obj, opts::StringDump, opts::Decompress);
if (!opts::HexDump.empty())
- Dumper->printSectionsAsHex(Obj, opts::HexDump);
+ Dumper->printSectionsAsHex(Obj, opts::HexDump, opts::Decompress);
if (opts::HashTable)
Dumper->printHashTable();
if (opts::GnuHashTable)
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 8104a32909ea..9e0abe7a16df 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -824,42 +824,7 @@ TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle_Pointers) {
TEST(ValueTracking, propagatesPoison) {
std::string AsmHead =
"declare i32 @g(i32)\n"
- "declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)\n"
- "declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)\n"
- "declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)\n"
- "declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)\n"
- "declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)\n"
- "declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)\n"
- "declare float @llvm.sqrt.f32(float)\n"
- "declare float @llvm.powi.f32.i32(float, i32)\n"
- "declare float @llvm.sin.f32(float)\n"
- "declare float @llvm.cos.f32(float)\n"
- "declare float @llvm.pow.f32(float, float)\n"
- "declare float @llvm.exp.f32(float)\n"
- "declare float @llvm.exp2.f32(float)\n"
- "declare float @llvm.log.f32(float)\n"
- "declare float @llvm.log10.f32(float)\n"
- "declare float @llvm.log2.f32(float)\n"
- "declare float @llvm.fma.f32(float, float, float)\n"
- "declare float @llvm.fabs.f32(float)\n"
- "declare float @llvm.minnum.f32(float, float)\n"
- "declare float @llvm.maxnum.f32(float, float)\n"
- "declare float @llvm.minimum.f32(float, float)\n"
- "declare float @llvm.maximum.f32(float, float)\n"
- "declare float @llvm.copysign.f32(float, float)\n"
- "declare float @llvm.floor.f32(float)\n"
- "declare float @llvm.ceil.f32(float)\n"
- "declare float @llvm.trunc.f32(float)\n"
- "declare float @llvm.rint.f32(float)\n"
- "declare float @llvm.nearbyint.f32(float)\n"
- "declare float @llvm.round.f32(float)\n"
- "declare float @llvm.roundeven.f32(float)\n"
- "declare i32 @llvm.lround.f32(float)\n"
- "declare i64 @llvm.llround.f32(float)\n"
- "declare i32 @llvm.lrint.f32(float)\n"
- "declare i64 @llvm.llrint.f32(float)\n"
- "declare float @llvm.fmuladd.f32(float, float, float)\n"
- "define void @f(i32 %x, i32 %y, float %fx, float %fy, "
+ "define void @f(i32 %x, i32 %y, i32 %shamt, float %fx, float %fy, "
"i1 %cond, ptr %p) {\n";
std::string AsmTail = " ret void\n}";
// (propagates poison?, IR instruction)
@@ -912,6 +877,28 @@ TEST(ValueTracking, propagatesPoison) {
{true, "call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)", 0},
{true, "call {i32, i1} @llvm.usub.with.overflow.i32(i32 %x, i32 %y)", 0},
{true, "call {i32, i1} @llvm.umul.with.overflow.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.sshl.sat.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.usub.sat.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.ushl.sat.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.ctpop.i32(i32 %x)", 0},
+ {true, "call i32 @llvm.ctlz.i32(i32 %x, i1 true)", 0},
+ {true, "call i32 @llvm.cttz.i32(i32 %x, i1 true)", 0},
+ {true, "call i32 @llvm.abs.i32(i32 %x, i1 true)", 0},
+ {true, "call i32 @llvm.smax.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.smin.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.umax.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.umin.i32(i32 %x, i32 %y)", 0},
+ {true, "call i32 @llvm.bitreverse.i32(i32 %x)", 0},
+ {true, "call i32 @llvm.bswap.i32(i32 %x)", 0},
+ {false, "call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %shamt)", 0},
+ {false, "call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %shamt)", 1},
+ {false, "call i32 @llvm.fshl.i32(i32 %x, i32 %y, i32 %shamt)", 2},
+ {false, "call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %shamt)", 0},
+ {false, "call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %shamt)", 1},
+ {false, "call i32 @llvm.fshr.i32(i32 %x, i32 %y, i32 %shamt)", 2},
{false, "call float @llvm.sqrt.f32(float %fx)", 0},
{false, "call float @llvm.powi.f32.i32(float %fx, i32 %x)", 0},
{false, "call float @llvm.sin.f32(float %fx)", 0},
diff --git a/llvm/unittests/CodeGen/CMakeLists.txt b/llvm/unittests/CodeGen/CMakeLists.txt
index 6140e0d6fb37..dbbacdd95ec9 100644
--- a/llvm/unittests/CodeGen/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/CMakeLists.txt
@@ -40,6 +40,7 @@ add_llvm_unittest(CodeGenTests
ScalableVectorMVTsTest.cpp
SchedBoundary.cpp
SelectionDAGAddressAnalysisTest.cpp
+ SelectionDAGPatternMatchTest.cpp
TypeTraitsTest.cpp
TargetOptionsTest.cpp
TestAsmPrinter.cpp
diff --git a/llvm/unittests/CodeGen/PassManagerTest.cpp b/llvm/unittests/CodeGen/PassManagerTest.cpp
index 28003c2f4b3f..4283eb01a9c8 100644
--- a/llvm/unittests/CodeGen/PassManagerTest.cpp
+++ b/llvm/unittests/CodeGen/PassManagerTest.cpp
@@ -5,13 +5,18 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+// Test that the various MachineFunction pass managers, adaptors, analyses, and
+// analysis managers work.
+//===----------------------------------------------------------------------===//
+#include "llvm/IR/PassManager.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachinePassManager.h"
+#include "llvm/IR/Analysis.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/TargetRegistry.h"
@@ -34,14 +39,9 @@ public:
int InstructionCount;
};
- /// Run the analysis pass over the function and return a result.
+ /// The number of instructions in the Function.
Result run(Function &F, FunctionAnalysisManager &AM) {
- int Count = 0;
- for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI)
- for (BasicBlock::iterator II = BBI->begin(), IE = BBI->end(); II != IE;
- ++II)
- ++Count;
- return Result(Count);
+ return Result(F.getInstructionCount());
}
private:
@@ -59,13 +59,12 @@ public:
int InstructionCount;
};
- /// Run the analysis pass over the machine function and return a result.
- Result run(MachineFunction &MF, MachineFunctionAnalysisManager::Base &AM) {
- auto &MFAM = static_cast<MachineFunctionAnalysisManager &>(AM);
- // Query function analysis result.
+ Result run(MachineFunction &MF, MachineFunctionAnalysisManager &AM) {
+ FunctionAnalysisManager &FAM =
+ AM.getResult<FunctionAnalysisManagerMachineFunctionProxy>(MF)
+ .getManager();
TestFunctionAnalysis::Result &FAR =
- MFAM.getResult<TestFunctionAnalysis>(MF.getFunction());
- // + 5
+ FAM.getResult<TestFunctionAnalysis>(MF.getFunction());
return FAR.InstructionCount;
}
@@ -76,90 +75,54 @@ private:
AnalysisKey TestMachineFunctionAnalysis::Key;
-const std::string DoInitErrMsg = "doInitialization failed";
-const std::string DoFinalErrMsg = "doFinalization failed";
-
struct TestMachineFunctionPass : public PassInfoMixin<TestMachineFunctionPass> {
- TestMachineFunctionPass(int &Count, std::vector<int> &BeforeInitialization,
- std::vector<int> &BeforeFinalization,
- std::vector<int> &MachineFunctionPassCount)
- : Count(Count), BeforeInitialization(BeforeInitialization),
- BeforeFinalization(BeforeFinalization),
- MachineFunctionPassCount(MachineFunctionPassCount) {}
-
- Error doInitialization(Module &M, MachineFunctionAnalysisManager &MFAM) {
- // Force doInitialization fail by starting with big `Count`.
- if (Count > 10000)
- return make_error<StringError>(DoInitErrMsg, inconvertibleErrorCode());
-
- // + 1
- ++Count;
- BeforeInitialization.push_back(Count);
- return Error::success();
- }
- Error doFinalization(Module &M, MachineFunctionAnalysisManager &MFAM) {
- // Force doFinalization fail by starting with big `Count`.
- if (Count > 1000)
- return make_error<StringError>(DoFinalErrMsg, inconvertibleErrorCode());
-
- // + 1
- ++Count;
- BeforeFinalization.push_back(Count);
- return Error::success();
- }
+ TestMachineFunctionPass(int &Count, std::vector<int> &Counts)
+ : Count(Count), Counts(Counts) {}
PreservedAnalyses run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM) {
- // Query function analysis result.
+ FunctionAnalysisManager &FAM =
+ MFAM.getResult<FunctionAnalysisManagerMachineFunctionProxy>(MF)
+ .getManager();
TestFunctionAnalysis::Result &FAR =
- MFAM.getResult<TestFunctionAnalysis>(MF.getFunction());
- // 3 + 1 + 1 = 5
+ FAM.getResult<TestFunctionAnalysis>(MF.getFunction());
Count += FAR.InstructionCount;
- // Query module analysis result.
- MachineModuleInfo &MMI =
- MFAM.getResult<MachineModuleAnalysis>(*MF.getFunction().getParent())
- .getMMI();
- // 1 + 1 + 1 = 3
- Count += (MMI.getModule() == MF.getFunction().getParent());
-
- // Query machine function analysis result.
TestMachineFunctionAnalysis::Result &MFAR =
MFAM.getResult<TestMachineFunctionAnalysis>(MF);
- // 3 + 1 + 1 = 5
Count += MFAR.InstructionCount;
- MachineFunctionPassCount.push_back(Count);
+ Counts.push_back(Count);
return PreservedAnalyses::none();
}
int &Count;
- std::vector<int> &BeforeInitialization;
- std::vector<int> &BeforeFinalization;
- std::vector<int> &MachineFunctionPassCount;
+ std::vector<int> &Counts;
};
struct TestMachineModulePass : public PassInfoMixin<TestMachineModulePass> {
- TestMachineModulePass(int &Count, std::vector<int> &MachineModulePassCount)
- : Count(Count), MachineModulePassCount(MachineModulePassCount) {}
-
- Error run(Module &M, MachineFunctionAnalysisManager &MFAM) {
- MachineModuleInfo &MMI = MFAM.getResult<MachineModuleAnalysis>(M).getMMI();
- // + 1
- Count += (MMI.getModule() == &M);
- MachineModulePassCount.push_back(Count);
- return Error::success();
- }
-
- PreservedAnalyses run(MachineFunction &MF,
- MachineFunctionAnalysisManager &AM) {
- llvm_unreachable(
- "This should never be reached because this is machine module pass");
+ TestMachineModulePass(int &Count, std::vector<int> &Counts)
+ : Count(Count), Counts(Counts) {}
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM) {
+ MachineModuleInfo &MMI = MAM.getResult<MachineModuleAnalysis>(M).getMMI();
+ FunctionAnalysisManager &FAM =
+ MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+ MachineFunctionAnalysisManager &MFAM =
+ MAM.getResult<MachineFunctionAnalysisManagerModuleProxy>(M)
+ .getManager();
+ for (Function &F : M) {
+ MachineFunction &MF = MMI.getOrCreateMachineFunction(F);
+ Count += FAM.getResult<TestFunctionAnalysis>(F).InstructionCount;
+ Count += MFAM.getResult<TestMachineFunctionAnalysis>(MF).InstructionCount;
+ }
+ Counts.push_back(Count);
+ return PreservedAnalyses::all();
}
int &Count;
- std::vector<int> &MachineModulePassCount;
+ std::vector<int> &Counts;
};
std::unique_ptr<Module> parseIR(LLVMContext &Context, const char *IR) {
@@ -211,102 +174,40 @@ TEST_F(PassManagerTest, Basic) {
M->setDataLayout(TM->createDataLayout());
MachineModuleInfo MMI(LLVMTM);
+
LoopAnalysisManager LAM;
FunctionAnalysisManager FAM;
CGSCCAnalysisManager CGAM;
ModuleAnalysisManager MAM;
+ MachineFunctionAnalysisManager MFAM;
PassBuilder PB(TM.get());
PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
PB.registerFunctionAnalyses(FAM);
- PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.registerMachineFunctionAnalyses(MFAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM, &MFAM);
FAM.registerPass([&] { return TestFunctionAnalysis(); });
- FAM.registerPass([&] { return PassInstrumentationAnalysis(); });
MAM.registerPass([&] { return MachineModuleAnalysis(MMI); });
- MAM.registerPass([&] { return PassInstrumentationAnalysis(); });
-
- MachineFunctionAnalysisManager MFAM;
- {
- // Test move assignment.
- MachineFunctionAnalysisManager NestedMFAM(FAM, MAM);
- NestedMFAM.registerPass([&] { return PassInstrumentationAnalysis(); });
- NestedMFAM.registerPass([&] { return TestMachineFunctionAnalysis(); });
- MFAM = std::move(NestedMFAM);
- }
+ MFAM.registerPass([&] { return TestMachineFunctionAnalysis(); });
int Count = 0;
- std::vector<int> BeforeInitialization[2];
- std::vector<int> BeforeFinalization[2];
- std::vector<int> TestMachineFunctionCount[2];
- std::vector<int> TestMachineModuleCount[2];
+ std::vector<int> Counts;
+ ModulePassManager MPM;
MachineFunctionPassManager MFPM;
- {
- // Test move assignment.
- MachineFunctionPassManager NestedMFPM;
- NestedMFPM.addPass(TestMachineModulePass(Count, TestMachineModuleCount[0]));
- NestedMFPM.addPass(TestMachineFunctionPass(Count, BeforeInitialization[0],
- BeforeFinalization[0],
- TestMachineFunctionCount[0]));
- NestedMFPM.addPass(TestMachineModulePass(Count, TestMachineModuleCount[1]));
- NestedMFPM.addPass(TestMachineFunctionPass(Count, BeforeInitialization[1],
- BeforeFinalization[1],
- TestMachineFunctionCount[1]));
- MFPM = std::move(NestedMFPM);
- }
+ MPM.addPass(TestMachineModulePass(Count, Counts));
+ MPM.addPass(createModuleToMachineFunctionPassAdaptor(
+ TestMachineFunctionPass(Count, Counts)));
+ MPM.addPass(TestMachineModulePass(Count, Counts));
+ MFPM.addPass(TestMachineFunctionPass(Count, Counts));
+ MPM.addPass(createModuleToMachineFunctionPassAdaptor(std::move(MFPM)));
+
+ MPM.run(*M, MAM);
- ASSERT_FALSE(errorToBool(MFPM.run(*M, MFAM)));
-
- // Check first machine module pass
- EXPECT_EQ(1u, TestMachineModuleCount[0].size());
- EXPECT_EQ(3, TestMachineModuleCount[0][0]);
-
- // Check first machine function pass
- EXPECT_EQ(1u, BeforeInitialization[0].size());
- EXPECT_EQ(1, BeforeInitialization[0][0]);
- EXPECT_EQ(3u, TestMachineFunctionCount[0].size());
- EXPECT_EQ(10, TestMachineFunctionCount[0][0]);
- EXPECT_EQ(13, TestMachineFunctionCount[0][1]);
- EXPECT_EQ(16, TestMachineFunctionCount[0][2]);
- EXPECT_EQ(1u, BeforeFinalization[0].size());
- EXPECT_EQ(31, BeforeFinalization[0][0]);
-
- // Check second machine module pass
- EXPECT_EQ(1u, TestMachineModuleCount[1].size());
- EXPECT_EQ(17, TestMachineModuleCount[1][0]);
-
- // Check second machine function pass
- EXPECT_EQ(1u, BeforeInitialization[1].size());
- EXPECT_EQ(2, BeforeInitialization[1][0]);
- EXPECT_EQ(3u, TestMachineFunctionCount[1].size());
- EXPECT_EQ(24, TestMachineFunctionCount[1][0]);
- EXPECT_EQ(27, TestMachineFunctionCount[1][1]);
- EXPECT_EQ(30, TestMachineFunctionCount[1][2]);
- EXPECT_EQ(1u, BeforeFinalization[1].size());
- EXPECT_EQ(32, BeforeFinalization[1][0]);
-
- EXPECT_EQ(32, Count);
-
- // doInitialization returns error
- Count = 10000;
- MFPM.addPass(TestMachineFunctionPass(Count, BeforeInitialization[1],
- BeforeFinalization[1],
- TestMachineFunctionCount[1]));
- std::string Message;
- llvm::handleAllErrors(MFPM.run(*M, MFAM), [&](llvm::StringError &Error) {
- Message = Error.getMessage();
- });
- EXPECT_EQ(Message, DoInitErrMsg);
-
- // doFinalization returns error
- Count = 1000;
- MFPM.addPass(TestMachineFunctionPass(Count, BeforeInitialization[1],
- BeforeFinalization[1],
- TestMachineFunctionCount[1]));
- llvm::handleAllErrors(MFPM.run(*M, MFAM), [&](llvm::StringError &Error) {
- Message = Error.getMessage();
- });
- EXPECT_EQ(Message, DoFinalErrMsg);
+ EXPECT_EQ((std::vector<int>{10, 16, 18, 20, 30, 36, 38, 40}), Counts);
+ EXPECT_EQ(40, Count);
}
} // namespace
diff --git a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
new file mode 100644
index 000000000000..17fc3ce8af26
--- /dev/null
+++ b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
@@ -0,0 +1,292 @@
+//===---- llvm/unittest/CodeGen/SelectionDAGPatternMatchTest.cpp ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/SDPatternMatch.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetMachine.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+class SelectionDAGPatternMatchTest : public testing::Test {
+protected:
+ static void SetUpTestCase() {
+ InitializeAllTargets();
+ InitializeAllTargetMCs();
+ }
+
+ void SetUp() override {
+ StringRef Assembly = "@g = global i32 0\n"
+ "@g_alias = alias i32, i32* @g\n"
+ "define i32 @f() {\n"
+ " %1 = load i32, i32* @g\n"
+ " ret i32 %1\n"
+ "}";
+
+ Triple TargetTriple("riscv64--");
+ std::string Error;
+ const Target *T = TargetRegistry::lookupTarget("", TargetTriple, Error);
+ // FIXME: These tests do not depend on RISCV specifically, but we have to
+ // initialize a target. A skeleton Target for unittests would allow us to
+ // always run these tests.
+ if (!T)
+ GTEST_SKIP();
+
+ TargetOptions Options;
+ TM = std::unique_ptr<LLVMTargetMachine>(static_cast<LLVMTargetMachine *>(
+ T->createTargetMachine("riscv64", "", "+m,+f,+d,+v", Options,
+ std::nullopt, std::nullopt,
+ CodeGenOptLevel::Aggressive)));
+ if (!TM)
+ GTEST_SKIP();
+
+ SMDiagnostic SMError;
+ M = parseAssemblyString(Assembly, SMError, Context);
+ if (!M)
+ report_fatal_error(SMError.getMessage());
+ M->setDataLayout(TM->createDataLayout());
+
+ F = M->getFunction("f");
+ if (!F)
+ report_fatal_error("F?");
+ G = M->getGlobalVariable("g");
+ if (!G)
+ report_fatal_error("G?");
+ AliasedG = M->getNamedAlias("g_alias");
+ if (!AliasedG)
+ report_fatal_error("AliasedG?");
+
+ MachineModuleInfo MMI(TM.get());
+
+ MF = std::make_unique<MachineFunction>(*F, *TM, *TM->getSubtargetImpl(*F),
+ 0, MMI);
+
+ DAG = std::make_unique<SelectionDAG>(*TM, CodeGenOptLevel::None);
+ if (!DAG)
+ report_fatal_error("DAG?");
+ OptimizationRemarkEmitter ORE(F);
+ DAG->init(*MF, ORE, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
+ }
+
+ TargetLoweringBase::LegalizeTypeAction getTypeAction(EVT VT) {
+ return DAG->getTargetLoweringInfo().getTypeAction(Context, VT);
+ }
+
+ EVT getTypeToTransformTo(EVT VT) {
+ return DAG->getTargetLoweringInfo().getTypeToTransformTo(Context, VT);
+ }
+
+ LLVMContext Context;
+ std::unique_ptr<LLVMTargetMachine> TM;
+ std::unique_ptr<Module> M;
+ Function *F;
+ GlobalVariable *G;
+ GlobalAlias *AliasedG;
+ std::unique_ptr<MachineFunction> MF;
+ std::unique_ptr<SelectionDAG> DAG;
+};
+
+TEST_F(SelectionDAGPatternMatchTest, matchValueType) {
+ SDLoc DL;
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto Float32VT = EVT::getFloatingPointVT(32);
+ auto VInt32VT = EVT::getVectorVT(Context, Int32VT, 4);
+
+ SDValue Op0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT);
+ SDValue Op1 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, Float32VT);
+ SDValue Op2 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, VInt32VT);
+
+ using namespace SDPatternMatch;
+ EXPECT_TRUE(sd_match(Op0, m_SpecificVT(Int32VT)));
+ EVT BindVT;
+ EXPECT_TRUE(sd_match(Op1, m_VT(BindVT)));
+ EXPECT_EQ(BindVT, Float32VT);
+ EXPECT_TRUE(sd_match(Op0, m_IntegerVT()));
+ EXPECT_TRUE(sd_match(Op1, m_FloatingPointVT()));
+ EXPECT_TRUE(sd_match(Op2, m_VectorVT()));
+ EXPECT_FALSE(sd_match(Op2, m_ScalableVectorVT()));
+}
+
+TEST_F(SelectionDAGPatternMatchTest, matchBinaryOp) {
+ SDLoc DL;
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto Float32VT = EVT::getFloatingPointVT(32);
+
+ SDValue Op0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT);
+ SDValue Op1 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, Int32VT);
+ SDValue Op2 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 3, Float32VT);
+
+ SDValue Add = DAG->getNode(ISD::ADD, DL, Int32VT, Op0, Op1);
+ SDValue Sub = DAG->getNode(ISD::SUB, DL, Int32VT, Add, Op0);
+ SDValue Mul = DAG->getNode(ISD::MUL, DL, Int32VT, Add, Sub);
+
+ SDValue SFAdd = DAG->getNode(ISD::STRICT_FADD, DL, {Float32VT, MVT::Other},
+ {DAG->getEntryNode(), Op2, Op2});
+
+ using namespace SDPatternMatch;
+ EXPECT_TRUE(sd_match(Sub, m_BinOp(ISD::SUB, m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(Sub, m_Sub(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(Add, m_c_BinOp(ISD::ADD, m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(Add, m_Add(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(
+ Mul, m_Mul(m_OneUse(m_Opc(ISD::SUB)), m_NUses<2>(m_Specific(Add)))));
+ EXPECT_TRUE(
+ sd_match(SFAdd, m_ChainedBinOp(ISD::STRICT_FADD, m_SpecificVT(Float32VT),
+ m_SpecificVT(Float32VT))));
+ SDValue BindVal;
+ EXPECT_TRUE(sd_match(SFAdd, m_ChainedBinOp(ISD::STRICT_FADD, m_Value(BindVal),
+ m_Deferred(BindVal))));
+ EXPECT_FALSE(sd_match(SFAdd, m_ChainedBinOp(ISD::STRICT_FADD, m_OtherVT(),
+ m_SpecificVT(Float32VT))));
+}
+
+TEST_F(SelectionDAGPatternMatchTest, matchUnaryOp) {
+ SDLoc DL;
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto Int64VT = EVT::getIntegerVT(Context, 64);
+
+ SDValue Op0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT);
+ SDValue Op1 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int64VT);
+
+ SDValue ZExt = DAG->getNode(ISD::ZERO_EXTEND, DL, Int64VT, Op0);
+ SDValue SExt = DAG->getNode(ISD::SIGN_EXTEND, DL, Int64VT, Op0);
+ SDValue Trunc = DAG->getNode(ISD::TRUNCATE, DL, Int32VT, Op1);
+
+ using namespace SDPatternMatch;
+ EXPECT_TRUE(sd_match(ZExt, m_UnaryOp(ISD::ZERO_EXTEND, m_Value())));
+ EXPECT_TRUE(sd_match(SExt, m_SExt(m_Value())));
+ EXPECT_TRUE(sd_match(Trunc, m_Trunc(m_Specific(Op1))));
+}
+
+TEST_F(SelectionDAGPatternMatchTest, matchConstants) {
+ SDLoc DL;
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto VInt32VT = EVT::getVectorVT(Context, Int32VT, 4);
+
+ SDValue Arg0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT);
+
+ SDValue Const3 = DAG->getConstant(3, DL, Int32VT);
+ SDValue Const87 = DAG->getConstant(87, DL, Int32VT);
+ SDValue Splat = DAG->getSplat(VInt32VT, DL, Arg0);
+ SDValue ConstSplat = DAG->getSplat(VInt32VT, DL, Const3);
+ SDValue Zero = DAG->getConstant(0, DL, Int32VT);
+ SDValue One = DAG->getConstant(1, DL, Int32VT);
+ SDValue AllOnes = DAG->getConstant(APInt::getAllOnes(32), DL, Int32VT);
+
+ using namespace SDPatternMatch;
+ EXPECT_TRUE(sd_match(Const87, m_ConstInt()));
+ EXPECT_FALSE(sd_match(Arg0, m_ConstInt()));
+ APInt ConstVal;
+ EXPECT_TRUE(sd_match(ConstSplat, m_ConstInt(ConstVal)));
+ EXPECT_EQ(ConstVal, 3);
+ EXPECT_FALSE(sd_match(Splat, m_ConstInt()));
+
+ EXPECT_TRUE(sd_match(Const87, m_SpecificInt(87)));
+ EXPECT_TRUE(sd_match(Const3, m_SpecificInt(ConstVal)));
+ EXPECT_TRUE(sd_match(AllOnes, m_AllOnes()));
+
+ EXPECT_TRUE(sd_match(Zero, DAG.get(), m_False()));
+ EXPECT_TRUE(sd_match(One, DAG.get(), m_True()));
+ EXPECT_FALSE(sd_match(AllOnes, DAG.get(), m_True()));
+}
+
+TEST_F(SelectionDAGPatternMatchTest, patternCombinators) {
+ SDLoc DL;
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+
+ SDValue Op0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT);
+ SDValue Op1 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, Int32VT);
+
+ SDValue Add = DAG->getNode(ISD::ADD, DL, Int32VT, Op0, Op1);
+ SDValue Sub = DAG->getNode(ISD::SUB, DL, Int32VT, Add, Op0);
+
+ using namespace SDPatternMatch;
+ EXPECT_TRUE(sd_match(
+ Sub, m_AnyOf(m_Opc(ISD::ADD), m_Opc(ISD::SUB), m_Opc(ISD::MUL))));
+ EXPECT_TRUE(sd_match(Add, m_AllOf(m_Opc(ISD::ADD), m_OneUse())));
+}
+
+TEST_F(SelectionDAGPatternMatchTest, matchNode) {
+ SDLoc DL;
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+
+ SDValue Op0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT);
+ SDValue Op1 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, Int32VT);
+
+ SDValue Add = DAG->getNode(ISD::ADD, DL, Int32VT, Op0, Op1);
+
+ using namespace SDPatternMatch;
+ EXPECT_TRUE(sd_match(Add, m_Node(ISD::ADD, m_Value(), m_Value())));
+ EXPECT_FALSE(sd_match(Add, m_Node(ISD::SUB, m_Value(), m_Value())));
+ EXPECT_FALSE(sd_match(Add, m_Node(ISD::ADD, m_Value())));
+ EXPECT_FALSE(
+ sd_match(Add, m_Node(ISD::ADD, m_Value(), m_Value(), m_Value())));
+ EXPECT_FALSE(sd_match(Add, m_Node(ISD::ADD, m_ConstInt(), m_Value())));
+}
+
+namespace {
+struct VPMatchContext : public SDPatternMatch::BasicMatchContext {
+ using SDPatternMatch::BasicMatchContext::BasicMatchContext;
+
+ bool match(SDValue OpVal, unsigned Opc) const {
+ if (!OpVal->isVPOpcode())
+ return OpVal->getOpcode() == Opc;
+
+ auto BaseOpc = ISD::getBaseOpcodeForVP(OpVal->getOpcode(), false);
+ return BaseOpc.has_value() && *BaseOpc == Opc;
+ }
+};
+} // anonymous namespace
+TEST_F(SelectionDAGPatternMatchTest, matchContext) {
+ SDLoc DL;
+ auto BoolVT = EVT::getIntegerVT(Context, 1);
+ auto Int32VT = EVT::getIntegerVT(Context, 32);
+ auto VInt32VT = EVT::getVectorVT(Context, Int32VT, 4);
+ auto MaskVT = EVT::getVectorVT(Context, BoolVT, 4);
+
+ SDValue Scalar0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int32VT);
+ SDValue Vector0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, VInt32VT);
+ SDValue Mask0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 3, MaskVT);
+
+ SDValue VPAdd = DAG->getNode(ISD::VP_ADD, DL, VInt32VT,
+ {Vector0, Vector0, Mask0, Scalar0});
+ SDValue VPReduceAdd = DAG->getNode(ISD::VP_REDUCE_ADD, DL, Int32VT,
+ {Scalar0, VPAdd, Mask0, Scalar0});
+
+ using namespace SDPatternMatch;
+ VPMatchContext VPCtx(DAG.get());
+ EXPECT_TRUE(sd_context_match(VPAdd, VPCtx, m_Opc(ISD::ADD)));
+ // VP_REDUCE_ADD doesn't have a based opcode, so we use a normal
+ // sd_match before switching to VPMatchContext when checking VPAdd.
+ EXPECT_TRUE(sd_match(VPReduceAdd, m_Node(ISD::VP_REDUCE_ADD, m_Value(),
+ m_Context(VPCtx, m_Opc(ISD::ADD)),
+ m_Value(), m_Value())));
+}
+
+TEST_F(SelectionDAGPatternMatchTest, matchAdvancedProperties) {
+ SDLoc DL;
+ auto Int16VT = EVT::getIntegerVT(Context, 16);
+ auto Int64VT = EVT::getIntegerVT(Context, 64);
+
+ SDValue Op0 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 1, Int64VT);
+ SDValue Op1 = DAG->getCopyFromReg(DAG->getEntryNode(), DL, 2, Int16VT);
+
+ SDValue Add = DAG->getNode(ISD::ADD, DL, Int64VT, Op0, Op0);
+
+ using namespace SDPatternMatch;
+ EXPECT_TRUE(sd_match(Op0, DAG.get(), m_LegalType(m_Value())));
+ EXPECT_FALSE(sd_match(Op1, DAG.get(), m_LegalType(m_Value())));
+ EXPECT_TRUE(sd_match(Add, DAG.get(),
+ m_LegalOp(m_IntegerVT(m_Add(m_Value(), m_Value())))));
+}
diff --git a/llvm/unittests/DebugInfo/DWARF/DWARFDebugLineTest.cpp b/llvm/unittests/DebugInfo/DWARF/DWARFDebugLineTest.cpp
index d42a626fa9c1..980b627625ee 100644
--- a/llvm/unittests/DebugInfo/DWARF/DWARFDebugLineTest.cpp
+++ b/llvm/unittests/DebugInfo/DWARF/DWARFDebugLineTest.cpp
@@ -823,7 +823,9 @@ TEST_F(DebugLineBasicFixture, ErrorForUnsupportedAddressSizeDefinedInHeader) {
nullptr, RecordRecoverable);
EXPECT_THAT_ERROR(
std::move(Recoverable),
- FailedWithMessage("address size 0x09 of DW_LNE_set_address opcode at "
+ FailedWithMessage("parsing line table prologue at offset 0x00000000: "
+ "invalid address size 9",
+ "address size 0x09 of DW_LNE_set_address opcode at "
"offset 0x00000038 is unsupported"));
ASSERT_THAT_EXPECTED(ExpectedLineTable, Succeeded());
ASSERT_EQ((*ExpectedLineTable)->Rows.size(), 3u);
diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp
index 7a9d91cc76b3..e3462f0f33f1 100644
--- a/llvm/unittests/IR/VPIntrinsicTest.cpp
+++ b/llvm/unittests/IR/VPIntrinsicTest.cpp
@@ -163,6 +163,14 @@ protected:
<< "(<8 x i16>, i1 immarg, <8 x i1>, i32) ";
Str << " declare <8 x i16> @llvm.vp.cttz.v8i16"
<< "(<8 x i16>, i1 immarg, <8 x i1>, i32) ";
+ Str << " declare <8 x i16> @llvm.vp.sadd.sat.v8i16"
+ << "(<8 x i16>, <8 x i16>, <8 x i1>, i32) ";
+ Str << " declare <8 x i16> @llvm.vp.uadd.sat.v8i16"
+ << "(<8 x i16>, <8 x i16>, <8 x i1>, i32) ";
+ Str << " declare <8 x i16> @llvm.vp.ssub.sat.v8i16"
+ << "(<8 x i16>, <8 x i16>, <8 x i1>, i32) ";
+ Str << " declare <8 x i16> @llvm.vp.usub.sat.v8i16"
+ << "(<8 x i16>, <8 x i16>, <8 x i1>, i32) ";
Str << " declare <8 x i16> @llvm.vp.fshl.v8i16"
<< "(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) ";
Str << " declare <8 x i16> @llvm.vp.fshr.v8i16"
diff --git a/llvm/unittests/MIR/CMakeLists.txt b/llvm/unittests/MIR/CMakeLists.txt
index f485dcbd971b..0ad52134a34d 100644
--- a/llvm/unittests/MIR/CMakeLists.txt
+++ b/llvm/unittests/MIR/CMakeLists.txt
@@ -1,5 +1,6 @@
set(LLVM_LINK_COMPONENTS
${LLVM_TARGETS_TO_BUILD}
+ Analysis
CodeGen
CodeGenTypes
Core
diff --git a/llvm/unittests/MIR/PassBuilderCallbacksTest.cpp b/llvm/unittests/MIR/PassBuilderCallbacksTest.cpp
index 8ecde223cb51..8e3738dc9192 100644
--- a/llvm/unittests/MIR/PassBuilderCallbacksTest.cpp
+++ b/llvm/unittests/MIR/PassBuilderCallbacksTest.cpp
@@ -6,6 +6,9 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/CodeGen/FreeMachineFunction.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Testing/Support/Error.h"
@@ -96,8 +99,6 @@ MATCHER_P(HasNameRegex, Name, "") {
}
struct MockPassInstrumentationCallbacks {
- PassInstrumentationCallbacks Callbacks;
-
MockPassInstrumentationCallbacks() {
ON_CALL(*this, runBeforePass(_, _)).WillByDefault(Return(true));
}
@@ -111,7 +112,7 @@ struct MockPassInstrumentationCallbacks {
MOCK_METHOD2(runBeforeAnalysis, void(StringRef PassID, llvm::Any));
MOCK_METHOD2(runAfterAnalysis, void(StringRef PassID, llvm::Any));
- void registerPassInstrumentation() {
+ void registerPassInstrumentation(PassInstrumentationCallbacks &Callbacks) {
Callbacks.registerShouldRunOptionalPassCallback(
[this](StringRef P, llvm::Any IR) {
return this->runBeforePass(P, IR);
@@ -147,7 +148,8 @@ struct MockPassInstrumentationCallbacks {
// to check these explicitly.
EXPECT_CALL(*this,
runBeforePass(Not(HasNameRegex("Mock")), HasName(IRName)))
- .Times(AnyNumber());
+ .Times(AnyNumber())
+ .WillRepeatedly(Return(false));
EXPECT_CALL(
*this, runBeforeSkippedPass(Not(HasNameRegex("Mock")), HasName(IRName)))
.Times(AnyNumber());
@@ -157,15 +159,9 @@ struct MockPassInstrumentationCallbacks {
EXPECT_CALL(*this,
runAfterPass(Not(HasNameRegex("Mock")), HasName(IRName), _))
.Times(AnyNumber());
- EXPECT_CALL(*this, runBeforeAnalysis(HasNameRegex("MachineModuleAnalysis"),
- HasName(IRName)))
- .Times(AnyNumber());
EXPECT_CALL(*this,
runBeforeAnalysis(Not(HasNameRegex("Mock")), HasName(IRName)))
.Times(AnyNumber());
- EXPECT_CALL(*this, runAfterAnalysis(HasNameRegex("MachineModuleAnalysis"),
- HasName(IRName)))
- .Times(AnyNumber());
EXPECT_CALL(*this,
runAfterAnalysis(Not(HasNameRegex("Mock")), HasName(IRName)))
.Times(AnyNumber());
@@ -202,7 +198,7 @@ public:
}
};
- Result run(MachineFunction &IR, MachineFunctionAnalysisManager::Base &AM) {
+ Result run(MachineFunction &IR, MachineFunctionAnalysisManager &AM) {
return Handle->run(IR, AM);
}
};
@@ -249,7 +245,7 @@ public:
public:
PreservedAnalyses run(MachineFunction &IR,
- MachineFunctionAnalysisManager::Base &AM) {
+ MachineFunctionAnalysisManager &AM) {
return Handle->run(IR, AM);
}
};
@@ -270,7 +266,7 @@ protected:
struct MockAnalysisHandle : public MockAnalysisHandleBase<MockAnalysisHandle> {
MOCK_METHOD2(run, Analysis::Result(MachineFunction &,
- MachineFunctionAnalysisManager::Base &));
+ MachineFunctionAnalysisManager &));
MOCK_METHOD3(invalidate, bool(MachineFunction &, const PreservedAnalyses &,
MachineFunctionAnalysisManager::Invalidator &));
@@ -284,7 +280,7 @@ AnalysisKey MockAnalysisHandleBase<DerivedT>::Analysis::Key;
class MockPassHandle : public MockPassHandleBase<MockPassHandle> {
public:
MOCK_METHOD2(run, PreservedAnalyses(MachineFunction &,
- MachineFunctionAnalysisManager::Base &));
+ MachineFunctionAnalysisManager &));
MockPassHandle() { setDefaults(); }
};
@@ -297,50 +293,51 @@ protected:
InitializeAllTargetMCs();
}
+ LLVMContext Context;
+
std::unique_ptr<LLVMTargetMachine> TM;
std::unique_ptr<MachineModuleInfo> MMI;
- LLVMContext Context;
std::unique_ptr<Module> M;
- std::unique_ptr<MIRParser> MIR;
-
- MockPassInstrumentationCallbacks CallbacksHandle;
- PassBuilder PB;
- ModulePassManager PM;
- MachineFunctionPassManager MFPM;
- FunctionAnalysisManager FAM;
- ModuleAnalysisManager AM;
+ PassInstrumentationCallbacks PIC;
+ std::unique_ptr<PassBuilder> PB;
+ ModulePassManager MPM;
MachineFunctionAnalysisManager MFAM;
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+ MockPassInstrumentationCallbacks CallbacksHandle;
MockPassHandle PassHandle;
MockAnalysisHandle AnalysisHandle;
- std::unique_ptr<Module> parseMIR(const TargetMachine &TM, StringRef MIRCode,
- MachineModuleInfo &MMI) {
+ static std::unique_ptr<Module> parseMIR(StringRef MIRCode,
+ LLVMContext &Context,
+ TargetMachine &TM,
+ MachineModuleInfo &MMI) {
SMDiagnostic Diagnostic;
std::unique_ptr<MemoryBuffer> MBuffer = MemoryBuffer::getMemBuffer(MIRCode);
- MIR = createMIRParser(std::move(MBuffer), Context);
- if (!MIR)
- return nullptr;
+ std::unique_ptr<MIRParser> MIR =
+ createMIRParser(std::move(MBuffer), Context);
+ assert(MIR);
std::unique_ptr<Module> Mod = MIR->parseIRModule();
- if (!Mod)
- return nullptr;
+ assert(Mod);
+ // Module identifier is used in tests below.
+ Mod->setModuleIdentifier("module");
Mod->setDataLayout(TM.createDataLayout());
- if (MIR->parseMachineFunctions(*Mod, MMI)) {
- M.reset();
- return nullptr;
- }
+ [[maybe_unused]] bool Ret = MIR->parseMachineFunctions(*Mod, MMI);
+ assert(!Ret);
+
return Mod;
}
static PreservedAnalyses
- getAnalysisResult(MachineFunction &U,
- MachineFunctionAnalysisManager::Base &AM) {
- auto &MFAM = static_cast<MachineFunctionAnalysisManager &>(AM);
+ getAnalysisResult(MachineFunction &U, MachineFunctionAnalysisManager &MFAM) {
MFAM.getResult<MockAnalysisHandle::Analysis>(U);
return PreservedAnalyses::all();
}
@@ -356,25 +353,18 @@ protected:
TripleName, "", "", TargetOptions(), std::nullopt)));
if (!TM)
GTEST_SKIP();
- MMI = std::make_unique<MachineModuleInfo>(TM.get());
- M = parseMIR(*TM, MIRString, *MMI);
- AM.registerPass([&] { return MachineModuleAnalysis(*MMI); });
- }
- MachineFunctionCallbacksTest()
- : CallbacksHandle(), PB(nullptr, PipelineTuningOptions(), std::nullopt,
- &CallbacksHandle.Callbacks),
- PM(), FAM(), AM(), MFAM(FAM, AM) {
-
- EXPECT_TRUE(&CallbacksHandle.Callbacks ==
- PB.getPassInstrumentationCallbacks());
+ MMI = std::make_unique<MachineModuleInfo>(TM.get());
+ M = parseMIR(MIRString, Context, *TM, *MMI);
+ PB = std::make_unique<PassBuilder>(TM.get(), PipelineTuningOptions(),
+ std::nullopt, &PIC);
/// Register a callback for analysis registration.
///
/// The callback is a function taking a reference to an AnalyisManager
/// object. When called, the callee gets to register its own analyses with
/// this PassBuilder instance.
- PB.registerAnalysisRegistrationCallback(
+ PB->registerAnalysisRegistrationCallback(
[this](MachineFunctionAnalysisManager &AM) {
// Register our mock analysis
AM.registerPass([this] { return AnalysisHandle.getAnalysis(); });
@@ -386,24 +376,29 @@ protected:
/// callbacks for each encountered pass name that it does not know. This
/// includes both simple pass names as well as names of sub-pipelines. In
/// the latter case, the InnerPipeline is not empty.
- PB.registerPipelineParsingCallback(
- [this](StringRef Name, MachineFunctionPassManager &PM) {
+ PB->registerPipelineParsingCallback(
+ [this](StringRef Name, MachineFunctionPassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement> InnerPipeline) {
if (parseAnalysisUtilityPasses<MockAnalysisHandle::Analysis>(
"test-analysis", Name, PM))
return true;
/// Parse the name of our pass mock handle
if (Name == "test-transform") {
- MFPM.addPass(PassHandle.getPass());
+ PM.addPass(PassHandle.getPass());
return true;
}
return false;
});
/// Register builtin analyses and cross-register the analysis proxies
- PB.registerModuleAnalyses(AM);
- PB.registerFunctionAnalyses(FAM);
- PB.registerMachineFunctionAnalyses(MFAM);
+ PB->registerModuleAnalyses(MAM);
+ PB->registerCGSCCAnalyses(CGAM);
+ PB->registerFunctionAnalyses(FAM);
+ PB->registerLoopAnalyses(LAM);
+ PB->registerMachineFunctionAnalyses(MFAM);
+ PB->crossRegisterProxies(LAM, FAM, CGAM, MAM, &MFAM);
+ MAM.registerPass([&] { return MachineModuleAnalysis(*MMI); });
}
};
@@ -412,53 +407,58 @@ TEST_F(MachineFunctionCallbacksTest, Passes) {
EXPECT_CALL(PassHandle, run(HasName("test"), _)).WillOnce(&getAnalysisResult);
StringRef PipelineText = "test-transform";
- ASSERT_THAT_ERROR(PB.parsePassPipeline(MFPM, PipelineText), Succeeded())
+ ASSERT_THAT_ERROR(PB->parsePassPipeline(MPM, PipelineText), Succeeded())
<< "Pipeline was: " << PipelineText;
- ASSERT_THAT_ERROR(MFPM.run(*M, MFAM), Succeeded());
+ MPM.run(*M, MAM);
}
TEST_F(MachineFunctionCallbacksTest, InstrumentedPasses) {
- CallbacksHandle.registerPassInstrumentation();
+ CallbacksHandle.registerPassInstrumentation(PIC);
// Non-mock instrumentation not specifically mentioned below can be ignored.
- CallbacksHandle.ignoreNonMockPassInstrumentation("<string>");
CallbacksHandle.ignoreNonMockPassInstrumentation("test");
- CallbacksHandle.ignoreNonMockPassInstrumentation("");
+ CallbacksHandle.ignoreNonMockPassInstrumentation("module");
// PassInstrumentation calls should happen in-sequence, in the same order
// as passes/analyses are scheduled.
::testing::Sequence PISequence;
EXPECT_CALL(CallbacksHandle,
runBeforePass(HasNameRegex("MockPassHandle"), HasName("test")))
- .InSequence(PISequence);
+ .InSequence(PISequence)
+ .WillOnce(Return(true));
EXPECT_CALL(
CallbacksHandle,
runBeforeNonSkippedPass(HasNameRegex("MockPassHandle"), HasName("test")))
.InSequence(PISequence);
- EXPECT_CALL(CallbacksHandle,
- runBeforeAnalysis(HasNameRegex("MockAnalysisHandle"), _))
+ EXPECT_CALL(
+ CallbacksHandle,
+ runBeforeAnalysis(HasNameRegex("MockAnalysisHandle"), HasName("test")))
.InSequence(PISequence);
- EXPECT_CALL(CallbacksHandle,
- runAfterAnalysis(HasNameRegex("MockAnalysisHandle"), _))
+ EXPECT_CALL(
+ CallbacksHandle,
+ runAfterAnalysis(HasNameRegex("MockAnalysisHandle"), HasName("test")))
.InSequence(PISequence);
EXPECT_CALL(CallbacksHandle,
runAfterPass(HasNameRegex("MockPassHandle"), HasName("test"), _))
.InSequence(PISequence);
+ EXPECT_CALL(
+ CallbacksHandle,
+ runBeforeSkippedPass(HasNameRegex("MockPassHandle"), HasName("test")))
+ .Times(0);
EXPECT_CALL(AnalysisHandle, run(HasName("test"), _));
EXPECT_CALL(PassHandle, run(HasName("test"), _)).WillOnce(&getAnalysisResult);
StringRef PipelineText = "test-transform";
- ASSERT_THAT_ERROR(PB.parsePassPipeline(MFPM, PipelineText), Succeeded())
+ ASSERT_THAT_ERROR(PB->parsePassPipeline(MPM, PipelineText), Succeeded())
<< "Pipeline was: " << PipelineText;
- ASSERT_THAT_ERROR(MFPM.run(*M, MFAM), Succeeded());
+ MPM.run(*M, MAM);
}
TEST_F(MachineFunctionCallbacksTest, InstrumentedSkippedPasses) {
- CallbacksHandle.registerPassInstrumentation();
+ CallbacksHandle.registerPassInstrumentation(PIC);
// Non-mock instrumentation run here can safely be ignored.
- CallbacksHandle.ignoreNonMockPassInstrumentation("<string>");
CallbacksHandle.ignoreNonMockPassInstrumentation("test");
- CallbacksHandle.ignoreNonMockPassInstrumentation("");
+ CallbacksHandle.ignoreNonMockPassInstrumentation("module");
// Skip the pass by returning false.
EXPECT_CALL(CallbacksHandle,
@@ -495,9 +495,81 @@ TEST_F(MachineFunctionCallbacksTest, InstrumentedSkippedPasses) {
.Times(0);
StringRef PipelineText = "test-transform";
- ASSERT_THAT_ERROR(PB.parsePassPipeline(MFPM, PipelineText), Succeeded())
+ ASSERT_THAT_ERROR(PB->parsePassPipeline(MPM, PipelineText), Succeeded())
<< "Pipeline was: " << PipelineText;
- ASSERT_THAT_ERROR(MFPM.run(*M, MFAM), Succeeded());
+ MPM.run(*M, MAM);
+}
+
+// Check that the Module -> MachineFunction adaptor properly calls
+// runAfterPassInvalidated.
+TEST_F(MachineFunctionCallbacksTest, InstrumentedFreeMFPass) {
+ CallbacksHandle.registerPassInstrumentation(PIC);
+ // Non-mock instrumentation run here can safely be ignored.
+ CallbacksHandle.ignoreNonMockPassInstrumentation("test");
+ CallbacksHandle.ignoreNonMockPassInstrumentation("module");
+
+ ::testing::Sequence PISequence;
+ EXPECT_CALL(
+ CallbacksHandle,
+ runBeforePass(HasNameRegex("FreeMachineFunctionPass"), HasName("test")))
+ .InSequence(PISequence)
+ .WillOnce(Return(true));
+ EXPECT_CALL(CallbacksHandle,
+ runBeforeNonSkippedPass(HasNameRegex("FreeMachineFunctionPass"),
+ HasName("test")))
+ .InSequence(PISequence);
+ EXPECT_CALL(CallbacksHandle, runAfterPassInvalidated(
+ HasNameRegex("FreeMachineFunctionPass"), _))
+ .InSequence(PISequence);
+
+ // runAfterPass should not be called since the MachineFunction is no longer
+ // valid after FreeMachineFunctionPass.
+ EXPECT_CALL(CallbacksHandle,
+ runAfterPass(HasNameRegex("FreeMachineFunctionPass"), _, _))
+ .Times(0);
+
+ MPM.addPass(
+ createModuleToMachineFunctionPassAdaptor(FreeMachineFunctionPass()));
+ MPM.run(*M, MAM);
+}
+
+// Check that the Module -> MachineFunction adaptor and MachineFunction pass
+// manager properly call runAfterPassInvalidated.
+TEST_F(MachineFunctionCallbacksTest, InstrumentedFreeMFPass2) {
+ CallbacksHandle.registerPassInstrumentation(PIC);
+ // Non-mock instrumentation run here can safely be ignored.
+ CallbacksHandle.ignoreNonMockPassInstrumentation("test");
+ CallbacksHandle.ignoreNonMockPassInstrumentation("module");
+
+ ::testing::Sequence PISequence;
+ EXPECT_CALL(
+ CallbacksHandle,
+ runBeforePass(HasNameRegex("FreeMachineFunctionPass"), HasName("test")))
+ .InSequence(PISequence)
+ .WillOnce(Return(true));
+ EXPECT_CALL(CallbacksHandle,
+ runBeforeNonSkippedPass(HasNameRegex("FreeMachineFunctionPass"),
+ HasName("test")))
+ .InSequence(PISequence);
+ EXPECT_CALL(CallbacksHandle, runAfterPassInvalidated(
+ HasNameRegex("FreeMachineFunctionPass"), _))
+ .InSequence(PISequence);
+ EXPECT_CALL(CallbacksHandle,
+ runAfterPassInvalidated(HasNameRegex("PassManager"), _))
+ .InSequence(PISequence);
+
+ // runAfterPass should not be called since the MachineFunction is no longer
+ // valid after FreeMachineFunctionPass.
+ EXPECT_CALL(CallbacksHandle,
+ runAfterPass(HasNameRegex("FreeMachineFunctionPass"), _, _))
+ .Times(0);
+ EXPECT_CALL(CallbacksHandle, runAfterPass(HasNameRegex("PassManager"), _, _))
+ .Times(0);
+
+ MachineFunctionPassManager MFPM;
+ MFPM.addPass(FreeMachineFunctionPass());
+ MPM.addPass(createModuleToMachineFunctionPassAdaptor(std::move(MFPM)));
+ MPM.run(*M, MAM);
}
} // end anonymous namespace
diff --git a/llvm/utils/TableGen/Attributes.cpp b/llvm/utils/TableGen/Attributes.cpp
index 474042a3e9a3..db3c4decccb4 100644
--- a/llvm/utils/TableGen/Attributes.cpp
+++ b/llvm/utils/TableGen/Attributes.cpp
@@ -87,7 +87,11 @@ void Attributes::emitFnAttrCompatCheck(raw_ostream &OS, bool IsStringAttr) {
for (auto *Rule : CompatRules) {
StringRef FuncName = Rule->getValueAsString("CompatFunc");
- OS << " Ret &= " << FuncName << "(Caller, Callee);\n";
+ OS << " Ret &= " << FuncName << "(Caller, Callee";
+ StringRef AttrName = Rule->getValueAsString("AttrName");
+ if (!AttrName.empty())
+ OS << ", \"" << AttrName << "\"";
+ OS << ");\n";
}
OS << "\n";
diff --git a/llvm/utils/TableGen/DXILEmitter.cpp b/llvm/utils/TableGen/DXILEmitter.cpp
index 768e8052975b..d47df597d53a 100644
--- a/llvm/utils/TableGen/DXILEmitter.cpp
+++ b/llvm/utils/TableGen/DXILEmitter.cpp
@@ -74,44 +74,32 @@ struct DXILOperationDesc {
};
} // end anonymous namespace
-// Convert DXIL type name string to dxil::ParameterKind
-//
-// @param typeNameStr Type name string
-// @return ParameterKind as defined in llvm/Support/DXILABI.h
-static ParameterKind getDXILTypeNameToKind(StringRef typeNameStr) {
- return StringSwitch<ParameterKind>(typeNameStr)
- .Case("voidTy", ParameterKind::VOID)
- .Case("f16Ty", ParameterKind::HALF)
- .Case("f32Ty", ParameterKind::FLOAT)
- .Case("f64Ty", ParameterKind::DOUBLE)
- .Case("i1Ty", ParameterKind::I1)
- .Case("i8Ty", ParameterKind::I8)
- .Case("i16Ty", ParameterKind::I16)
- .Case("i32Ty", ParameterKind::I32)
- .Case("i64Ty", ParameterKind::I64)
- .Case("overloadTy", ParameterKind::OVERLOAD)
- .Case("handleTy", ParameterKind::DXIL_HANDLE)
- .Case("cbufferRetTy", ParameterKind::CBUFFER_RET)
- .Case("resourceRetTy", ParameterKind::RESOURCE_RET)
- .Default(ParameterKind::INVALID);
-}
-
-static ParameterKind parameterTypeNameToKind(StringRef Name) {
- return StringSwitch<ParameterKind>(Name)
- .Case("void", ParameterKind::VOID)
- .Case("half", ParameterKind::HALF)
- .Case("float", ParameterKind::FLOAT)
- .Case("double", ParameterKind::DOUBLE)
- .Case("i1", ParameterKind::I1)
- .Case("i8", ParameterKind::I8)
- .Case("i16", ParameterKind::I16)
- .Case("i32", ParameterKind::I32)
- .Case("i64", ParameterKind::I64)
- .Case("$o", ParameterKind::OVERLOAD)
- .Case("dx.types.Handle", ParameterKind::DXIL_HANDLE)
- .Case("dx.types.CBufRet", ParameterKind::CBUFFER_RET)
- .Case("dx.types.ResRet", ParameterKind::RESOURCE_RET)
- .Default(ParameterKind::INVALID);
+/*!
+ Convert DXIL type name string to dxil::ParameterKind
+
+ @param typeNameStr Type name string
+ @return ParameterKind As defined in llvm/Support/DXILABI.h
+*/
+static ParameterKind lookupParameterKind(StringRef typeNameStr) {
+ auto paramKind = StringSwitch<ParameterKind>(typeNameStr)
+ .Case("llvm_void_ty", ParameterKind::VOID)
+ .Case("llvm_half_ty", ParameterKind::HALF)
+ .Case("llvm_float_ty", ParameterKind::FLOAT)
+ .Case("llvm_double_ty", ParameterKind::DOUBLE)
+ .Case("llvm_i1_ty", ParameterKind::I1)
+ .Case("llvm_i8_ty", ParameterKind::I8)
+ .Case("llvm_i16_ty", ParameterKind::I16)
+ .Case("llvm_i32_ty", ParameterKind::I32)
+ .Case("llvm_i64_ty", ParameterKind::I64)
+ .Case("llvm_anyfloat_ty", ParameterKind::OVERLOAD)
+ .Case("llvm_anyint_ty", ParameterKind::OVERLOAD)
+ .Case("dxil_handle_ty", ParameterKind::DXIL_HANDLE)
+ .Case("dxil_cbuffer_ty", ParameterKind::CBUFFER_RET)
+ .Case("dxil_resource_ty", ParameterKind::RESOURCE_RET)
+ .Default(ParameterKind::INVALID);
+ assert(paramKind != ParameterKind::INVALID &&
+ "Unsupported DXIL Type specified");
+ return paramKind;
}
DXILOperationDesc::DXILOperationDesc(const Record *R) {
@@ -143,7 +131,7 @@ DXILOperationDesc::DXILOperationDesc(const Record *R) {
for (unsigned I = 0; I < OverloadTypeList->size(); ++I) {
Record *R = OverloadTypeList->getElementAsRecord(I);
- OverloadTypes.emplace_back(getDXILTypeNameToKind(R->getNameInitAsString()));
+ OverloadTypes.emplace_back(lookupParameterKind(R->getNameInitAsString()));
}
Attr = StringRef(R->getValue("Attribute")->getNameInitAsString());
}
@@ -151,7 +139,8 @@ DXILOperationDesc::DXILOperationDesc(const Record *R) {
DXILParameter::DXILParameter(const Record *R) {
Name = R->getValueAsString("Name");
Pos = R->getValueAsInt("Pos");
- Kind = parameterTypeNameToKind(R->getValueAsString("Type"));
+ Kind =
+ lookupParameterKind(R->getValue("ParamType")->getValue()->getAsString());
if (R->getValue("Doc"))
Doc = R->getValueAsString("Doc");
IsConst = R->getValueAsBit("IsConstant");
@@ -296,10 +285,12 @@ static void emitDXILIntrinsicMap(std::vector<DXILOperationDesc> &Ops,
OS << "\n";
}
-// Convert operation attribute string to Attribute enum
-//
-// @param Attr string reference
-// @return std::string Attribute enum string
+/*!
+ Convert operation attribute string to Attribute enum
+
+ @param Attr string reference
+ @return std::string Attribute enum string
+ */
static std::string emitDXILOperationAttr(StringRef Attr) {
return StringSwitch<std::string>(Attr)
.Case("ReadNone", "Attribute::ReadNone")
diff --git a/llvm/utils/TableGen/DecoderEmitter.cpp b/llvm/utils/TableGen/DecoderEmitter.cpp
index 22a71065134a..36f437f02cf5 100644
--- a/llvm/utils/TableGen/DecoderEmitter.cpp
+++ b/llvm/utils/TableGen/DecoderEmitter.cpp
@@ -28,6 +28,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCDecoderOps.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
@@ -50,6 +51,13 @@ using namespace llvm;
#define DEBUG_TYPE "decoder-emitter"
+extern cl::OptionCategory DisassemblerEmitterCat;
+
+cl::opt<bool> DecoderEmitterSuppressDuplicates(
+ "suppress-per-hwmode-duplicates",
+ cl::desc("Suppress duplication of instrs into per-HwMode decoder tables"),
+ cl::init(false), cl::cat(DisassemblerEmitterCat));
+
namespace {
STATISTIC(NumEncodings, "Number of encodings considered");
@@ -2496,10 +2504,15 @@ void DecoderEmitter::run(raw_ostream &o) {
}
}
// This instruction is encoded the same on all HwModes. Emit it for all
- // HwModes.
- for (StringRef HwModeName : HwModeNames)
+ // HwModes by default, otherwise leave it in a single common table.
+ if (DecoderEmitterSuppressDuplicates) {
NumberedEncodings.emplace_back(NumberedInstruction->TheDef,
- NumberedInstruction, HwModeName);
+ NumberedInstruction, "AllModes");
+ } else {
+ for (StringRef HwModeName : HwModeNames)
+ NumberedEncodings.emplace_back(NumberedInstruction->TheDef,
+ NumberedInstruction, HwModeName);
+ }
}
for (const auto &NumberedAlias :
RK.getAllDerivedDefinitions("AdditionalEncoding"))
diff --git a/llvm/utils/TableGen/DisassemblerEmitter.cpp b/llvm/utils/TableGen/DisassemblerEmitter.cpp
index ae6a8ef22bc8..2d653af4d302 100644
--- a/llvm/utils/TableGen/DisassemblerEmitter.cpp
+++ b/llvm/utils/TableGen/DisassemblerEmitter.cpp
@@ -131,5 +131,7 @@ static void EmitDisassembler(RecordKeeper &Records, raw_ostream &OS) {
EmitDecoder(Records, OS, PredicateNamespace);
}
+cl::OptionCategory DisassemblerEmitterCat("Options for -gen-disassembler");
+
static TableGen::Emitter::Opt X("gen-disassembler", EmitDisassembler,
"Generate disassembler");
diff --git a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
index 98c2068f6da2..8cb60fd81840 100644
--- a/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/tools/lldb-dap/BUILD.gn
@@ -51,6 +51,7 @@ executable("lldb-dap") {
"ProgressEvent.cpp",
"RunInTerminal.cpp",
"SourceBreakpoint.cpp",
+ "Watchpoint.cpp",
"lldb-dap.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn
index c3cafe58a1ad..747ca8f9c91d 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/Hexagon/BUILD.gn
@@ -57,6 +57,7 @@ static_library("LLVMHexagonCodeGen") {
"HexagonFrameLowering.cpp",
"HexagonGenExtract.cpp",
"HexagonGenInsert.cpp",
+ "HexagonGenMemAbsolute.cpp",
"HexagonGenMux.cpp",
"HexagonGenPredicate.cpp",
"HexagonHardwareLoops.cpp",
@@ -74,6 +75,7 @@ static_library("LLVMHexagonCodeGen") {
"HexagonOptAddrMode.cpp",
"HexagonOptimizeSZextends.cpp",
"HexagonPeephole.cpp",
+ "HexagonPostIncOpt.cpp",
"HexagonRDFOpt.cpp",
"HexagonRegisterInfo.cpp",
"HexagonSelectionDAGInfo.cpp",
@@ -84,6 +86,7 @@ static_library("LLVMHexagonCodeGen") {
"HexagonTargetMachine.cpp",
"HexagonTargetObjectFile.cpp",
"HexagonTargetTransformInfo.cpp",
+ "HexagonTfrCleanup.cpp",
"HexagonVExtract.cpp",
"HexagonVLIWPacketizer.cpp",
"HexagonVectorCombine.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
index 8aff3e4d230e..df71d089ac0e 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/CodeGen/BUILD.gn
@@ -40,6 +40,7 @@ unittest("CodeGenTests") {
"ScalableVectorMVTsTest.cpp",
"SchedBoundary.cpp",
"SelectionDAGAddressAnalysisTest.cpp",
+ "SelectionDAGPatternMatchTest.cpp",
"TargetOptionsTest.cpp",
"TestAsmPrinter.cpp",
"TypeTraitsTest.cpp",
diff --git a/llvm/utils/release/build_llvm_release.bat b/llvm/utils/release/build_llvm_release.bat
index 67bb22de7606..dc63fdac1e4a 100755
--- a/llvm/utils/release/build_llvm_release.bat
+++ b/llvm/utils/release/build_llvm_release.bat
@@ -287,7 +287,16 @@ ninja check-sanitizer || ninja check-sanitizer || ninja check-sanitizer || exit
ninja check-clang-tools || ninja check-clang-tools || ninja check-clang-tools || exit /b 1
ninja check-clangd || ninja check-clangd || ninja check-clangd || exit /b 1
ninja package || exit /b 1
+
+:: generate tarball with install toolchain only off
+set filename=clang+llvm-%version%-x86_64-pc-windows-msvc
+cmake -GNinja %cmake_flags% %cmake_profile_flags% -DLLVM_INSTALL_TOOLCHAIN_ONLY=OFF ^
+ -DCMAKE_INSTALL_PREFIX=%build_dir%/%filename% ..\llvm-project\llvm || exit /b 1
+ninja install || exit /b 1
+:: check llvm_config is present & returns something
+%build_dir%/%filename%/bin/llvm-config.exe --bindir || exit /b 1
cd ..
+7z a -ttar -so %filename%.tar %filename% | 7z a -txz -si %filename%.tar.xz
exit /b 0
::==============================================================================
diff --git a/mlir/CMakeLists.txt b/mlir/CMakeLists.txt
index 2d9f78e03ba7..16c898bdeb6e 100644
--- a/mlir/CMakeLists.txt
+++ b/mlir/CMakeLists.txt
@@ -123,7 +123,6 @@ else()
endif()
add_definitions(-DMLIR_ROCM_CONVERSIONS_ENABLED=${MLIR_ENABLE_ROCM_CONVERSIONS})
-set(MLIR_ENABLE_DEPRECATED_GPU_SERIALIZATION 0 CACHE BOOL "Enable deprecated GPU serialization passes")
set(MLIR_ENABLE_CUDA_RUNNER 0 CACHE BOOL "Enable building the mlir CUDA runner")
set(MLIR_ENABLE_ROCM_RUNNER 0 CACHE BOOL "Enable building the mlir ROCm runner")
set(MLIR_ENABLE_SYCL_RUNNER 0 CACHE BOOL "Enable building the mlir Sycl runner")
diff --git a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h
index 5885facd0754..8f7466a697d8 100644
--- a/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/GPU/Transforms/Passes.h
@@ -147,25 +147,11 @@ protected:
// Registration
//===----------------------------------------------------------------------===//
-/// Register pass to serialize GPU kernel functions to a CUBIN binary
-/// annotation.
-LLVM_DEPRECATED("use Target attributes instead", "")
-void registerGpuSerializeToCubinPass();
-
/// Register pass to serialize GPU kernel functions to a HSAco binary
/// annotation.
LLVM_DEPRECATED("use Target attributes instead", "")
void registerGpuSerializeToHsacoPass();
-/// Create an instance of the GPU kernel function to CUBIN binary serialization
-/// pass with optLevel (default level 2).
-LLVM_DEPRECATED("use Target attributes instead", "")
-std::unique_ptr<Pass> createGpuSerializeToCubinPass(StringRef triple,
- StringRef chip,
- StringRef features,
- int optLevel = 2,
- bool dumpPtx = false);
-
/// Create an instance of the GPU kernel function to HSAco binary serialization
/// pass.
LLVM_DEPRECATED("use Target attributes instead", "")
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index feb3578fe2d4..b88f1186a44b 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -513,7 +513,11 @@ def LLVM_DbgLabelOp : LLVM_IntrOp<"dbg.label", [], [], [], 0> {
});
}];
let mlirBuilder = [{
- $_op = $_builder.create<$_qualCppClassName>($_location, $_label_attr($label));
+ DILabelAttr labelAttr = $_label_attr($label);
+ // Drop the intrinsic if the label translation fails due to cylic metadata.
+ if (!labelAttr)
+ return success();
+ $_op = $_builder.create<$_qualCppClassName>($_location, labelAttr);
}];
let assemblyFormat = "$label attr-dict";
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index d9b130bdf18c..3da5deeb4ec7 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -49,7 +49,7 @@ class LLVM_ArithmeticOpBase<Type type, string mnemonic,
}
class LLVM_IntArithmeticOp<string mnemonic, string instName,
list<Trait> traits = []> :
- LLVM_ArithmeticOpBase<AnyInteger, mnemonic, instName, traits> {
+ LLVM_ArithmeticOpBase<AnySignlessInteger, mnemonic, instName, traits> {
let arguments = commonArgs;
string mlirBuilder = [{
$res = $_builder.create<$_qualCppClassName>($_location, $lhs, $rhs);
@@ -57,7 +57,7 @@ class LLVM_IntArithmeticOp<string mnemonic, string instName,
}
class LLVM_IntArithmeticOpWithOverflowFlag<string mnemonic, string instName,
list<Trait> traits = []> :
- LLVM_ArithmeticOpBase<AnyInteger, mnemonic, instName,
+ LLVM_ArithmeticOpBase<AnySignlessInteger, mnemonic, instName,
!listconcat([DeclareOpInterfaceMethods<IntegerOverflowFlagsInterface>], traits)> {
dag iofArg = (
ins DefaultValuedAttr<LLVM_IntegerOverflowFlagsAttr, "{}">:$overflowFlags);
@@ -143,9 +143,9 @@ class LLVM_ArithmeticCmpOp<string mnemonic, list<Trait> traits = []> :
// Other integer operations.
def LLVM_ICmpOp : LLVM_ArithmeticCmpOp<"icmp", [Pure]> {
let arguments = (ins ICmpPredicate:$predicate,
- AnyTypeOf<[LLVM_ScalarOrVectorOf<AnyInteger>,
+ AnyTypeOf<[LLVM_ScalarOrVectorOf<AnySignlessInteger>,
LLVM_ScalarOrVectorOf<LLVM_AnyPointer>]>:$lhs,
- AnyTypeOf<[LLVM_ScalarOrVectorOf<AnyInteger>,
+ AnyTypeOf<[LLVM_ScalarOrVectorOf<AnySignlessInteger>,
LLVM_ScalarOrVectorOf<LLVM_AnyPointer>]>:$rhs);
let hasCustomAssemblyFormat = 1;
string llvmInstName = "ICmp";
@@ -204,7 +204,7 @@ def LLVM_AllocaOp : LLVM_Op<"alloca",
DeclareOpInterfaceMethods<DestructurableAllocationOpInterface>,
DeclareOpInterfaceMethods<GetResultPtrElementType>]>,
LLVM_MemOpPatterns {
- let arguments = (ins AnyInteger:$arraySize,
+ let arguments = (ins AnySignlessInteger:$arraySize,
OptionalAttr<I64Attr>:$alignment,
TypeAttr:$elem_type,
UnitAttr:$inalloca);
@@ -250,7 +250,7 @@ def LLVM_GEPOp : LLVM_Op<"getelementptr", [Pure,
DeclareOpInterfaceMethods<DestructurableAccessorOpInterface>,
DeclareOpInterfaceMethods<GetResultPtrElementType>]> {
let arguments = (ins LLVM_ScalarOrVectorOf<LLVM_AnyPointer>:$base,
- Variadic<LLVM_ScalarOrVectorOf<AnyInteger>>:$dynamicIndices,
+ Variadic<LLVM_ScalarOrVectorOf<AnySignlessInteger>>:$dynamicIndices,
DenseI32ArrayAttr:$rawConstantIndices,
TypeAttr:$elem_type,
UnitAttr:$inbounds);
@@ -499,37 +499,37 @@ def LLVM_AddrSpaceCastOp : LLVM_CastOp<"addrspacecast", "AddrSpaceCast",
let hasFolder = 1;
}
def LLVM_IntToPtrOp : LLVM_CastOp<"inttoptr", "IntToPtr",
- LLVM_ScalarOrVectorOf<AnyInteger>,
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>,
LLVM_ScalarOrVectorOf<LLVM_AnyPointer>>;
def LLVM_PtrToIntOp : LLVM_CastOp<"ptrtoint", "PtrToInt",
LLVM_ScalarOrVectorOf<LLVM_AnyPointer>,
- LLVM_ScalarOrVectorOf<AnyInteger>>;
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>>;
def LLVM_SExtOp : LLVM_CastOp<"sext", "SExt",
- LLVM_ScalarOrVectorOf<AnyInteger>,
- LLVM_ScalarOrVectorOf<AnyInteger>> {
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>,
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>> {
let hasVerifier = 1;
}
def LLVM_ZExtOp : LLVM_CastOp<"zext", "ZExt",
- LLVM_ScalarOrVectorOf<AnyInteger>,
- LLVM_ScalarOrVectorOf<AnyInteger>> {
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>,
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>> {
let hasFolder = 1;
let hasVerifier = 1;
}
def LLVM_TruncOp : LLVM_CastOp<"trunc", "Trunc",
- LLVM_ScalarOrVectorOf<AnyInteger>,
- LLVM_ScalarOrVectorOf<AnyInteger>>;
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>,
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>>;
def LLVM_SIToFPOp : LLVM_CastOp<"sitofp", "SIToFP",
- LLVM_ScalarOrVectorOf<AnyInteger>,
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>,
LLVM_ScalarOrVectorOf<LLVM_AnyFloat>>;
def LLVM_UIToFPOp : LLVM_CastOp<"uitofp", "UIToFP",
- LLVM_ScalarOrVectorOf<AnyInteger>,
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>,
LLVM_ScalarOrVectorOf<LLVM_AnyFloat>>;
def LLVM_FPToSIOp : LLVM_CastOp<"fptosi", "FPToSI",
LLVM_ScalarOrVectorOf<LLVM_AnyFloat>,
- LLVM_ScalarOrVectorOf<AnyInteger>>;
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>>;
def LLVM_FPToUIOp : LLVM_CastOp<"fptoui", "FPToUI",
LLVM_ScalarOrVectorOf<LLVM_AnyFloat>,
- LLVM_ScalarOrVectorOf<AnyInteger>>;
+ LLVM_ScalarOrVectorOf<AnySignlessInteger>>;
def LLVM_FPExtOp : LLVM_CastOp<"fpext", "FPExt",
LLVM_ScalarOrVectorOf<LLVM_AnyFloat>,
LLVM_ScalarOrVectorOf<LLVM_AnyFloat>>;
@@ -671,7 +671,7 @@ def LLVM_ExtractElementOp : LLVM_Op<"extractelement", [Pure,
"LLVM::getVectorElementType($_self)">]> {
let summary = "Extract an element from an LLVM vector.";
- let arguments = (ins LLVM_AnyVector:$vector, AnyInteger:$position);
+ let arguments = (ins LLVM_AnyVector:$vector, AnySignlessInteger:$position);
let results = (outs LLVM_Type:$res);
let assemblyFormat = [{
@@ -733,7 +733,7 @@ def LLVM_InsertElementOp : LLVM_Op<"insertelement", [Pure,
let summary = "Insert an element into an LLVM vector.";
let arguments = (ins LLVM_AnyVector:$vector, LLVM_PrimitiveType:$value,
- AnyInteger:$position);
+ AnySignlessInteger:$position);
let results = (outs LLVM_AnyVector:$res);
let builders = [LLVM_OneResultOpBuilder];
@@ -971,7 +971,7 @@ def LLVM_SwitchOp : LLVM_TerminatorOp<"switch",
DeclareOpInterfaceMethods<BranchWeightOpInterface>,
Pure]> {
let arguments = (ins
- AnyInteger:$value,
+ AnySignlessInteger:$value,
Variadic<AnyType>:$defaultOperands,
VariadicOfVariadic<AnyType, "case_operand_segments">:$caseOperands,
OptionalAttr<AnyIntElementsAttr>:$case_values,
@@ -1647,7 +1647,7 @@ def LLVM_ConstantOp
// Atomic operations.
//
-def LLVM_AtomicRMWType : AnyTypeOf<[LLVM_AnyFloat, LLVM_AnyPointer, AnyInteger]>;
+def LLVM_AtomicRMWType : AnyTypeOf<[LLVM_AnyFloat, LLVM_AnyPointer, AnySignlessInteger]>;
def LLVM_AtomicRMWOp : LLVM_MemAccessOpBase<"atomicrmw", [
TypesMatchWith<"result #0 and operand #1 have the same type",
@@ -1696,7 +1696,7 @@ def LLVM_AtomicRMWOp : LLVM_MemAccessOpBase<"atomicrmw", [
let hasVerifier = 1;
}
-def LLVM_AtomicCmpXchgType : AnyTypeOf<[AnyInteger, LLVM_AnyPointer]>;
+def LLVM_AtomicCmpXchgType : AnyTypeOf<[AnySignlessInteger, LLVM_AnyPointer]>;
def LLVM_AtomicCmpXchgOp : LLVM_MemAccessOpBase<"cmpxchg", [
TypesMatchWith<"operand #1 and operand #2 have the same type",
diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
index 010dde5ea738..11b2c7a7afa2 100644
--- a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
@@ -45,6 +45,9 @@ struct MathPolynomialApproximationOptions {
bool enableAvx2 = false;
};
+void populatePolynomialApproximateTanhPattern(RewritePatternSet &patterns);
+void populatePolynomialApproximateErfPattern(RewritePatternSet &patterns);
+
void populateMathPolynomialApproximationPatterns(
RewritePatternSet &patterns,
const MathPolynomialApproximationOptions &options = {});
diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
index a00c9c31256c..1c81d80ea7ec 100644
--- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
+++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
@@ -145,12 +145,9 @@ constexpr bool isComplexPrimaryType(PrimaryType valTy) {
/// The actions performed by @newSparseTensor.
enum class Action : uint32_t {
kEmpty = 0,
- kEmptyForward = 1,
- kFromCOO = 2,
- kFromReader = 4,
- kToCOO = 5,
- kPack = 7,
- kSortCOOInPlace = 8,
+ kFromReader = 1,
+ kPack = 2,
+ kSortCOOInPlace = 3,
};
/// This enum defines all supported storage format without the level properties.
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
index 0ee9e713724e..0ecded75c5d8 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td
@@ -990,6 +990,26 @@ def Tosa_ClzOp : Tosa_ElementwiseOp<"clz", [SameOperandsAndResultElementType]> {
}
//===----------------------------------------------------------------------===//
+// Operator: cos
+//===----------------------------------------------------------------------===//
+def Tosa_CosOp : Tosa_ElementwiseOp<"cos",
+ [SameOperandsAndResultElementType]> {
+ let summary = "Elementwise cos op";
+
+ let description = [{
+ Elementwise cosine operation for values given in radians.
+ }];
+
+ let arguments = (ins
+ Tosa_FloatTensor:$input
+ );
+
+ let results = (outs
+ Tosa_FloatTensor:$output
+ );
+}
+
+//===----------------------------------------------------------------------===//
// Operator: exp
//===----------------------------------------------------------------------===//
def Tosa_ExpOp : Tosa_ElementwiseOp<"exp", [SameOperandsAndResultElementType]> {
@@ -1149,6 +1169,26 @@ def Tosa_RsqrtOp : Tosa_ElementwiseOp<"rsqrt",
}
//===----------------------------------------------------------------------===//
+// Operator: sin
+//===----------------------------------------------------------------------===//
+def Tosa_SinOp : Tosa_ElementwiseOp<"sin",
+ [SameOperandsAndResultElementType]> {
+ let summary = "Elementwise sin op";
+
+ let description = [{
+ Elementwise sine operation for values given in radians.
+ }];
+
+ let arguments = (ins
+ Tosa_FloatTensor:$input
+ );
+
+ let results = (outs
+ Tosa_FloatTensor:$output
+ );
+}
+
+//===----------------------------------------------------------------------===//
// TOSA Spec Section 2.6
// Operator Class: Elementwise unary/binary/ternary operators.
// Operator Subclass: Elementwise ternary ops.
diff --git a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
index c55ddaafdda7..5a4d6ff464f1 100644
--- a/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
+++ b/mlir/include/mlir/Dialect/Tosa/IR/TosaTypesBase.td
@@ -113,6 +113,8 @@ def Tosa_Weight : AnyTypeOf<[Tosa_Int4, Tosa_Int8,
def Tosa_Int32Tensor : TensorOf<[Tosa_Int32]>;
def Tosa_Int32Or64Tensor : TensorOf<[Tosa_Int32Or64]>;
+def Tosa_FloatTensor : TensorOf<[Tosa_Float]>;
+
// Either ranked or unranked tensor of TOSA supported element types.
def Tosa_Tensor : TensorOf<[Tosa_AnyNumber]>;
def Tosa_Tensor_Plus_F64 : TensorOf<[Tosa_AnyNumber_Plus_F64]>;
diff --git a/mlir/include/mlir/Dialect/Utils/IndexingUtils.h b/mlir/include/mlir/Dialect/Utils/IndexingUtils.h
index 2453d841f633..9892253df2bf 100644
--- a/mlir/include/mlir/Dialect/Utils/IndexingUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/IndexingUtils.h
@@ -257,6 +257,9 @@ SmallVector<int64_t> getI64SubArray(ArrayAttr arrayAttr, unsigned dropFront = 0,
std::pair<AffineExpr, SmallVector<OpFoldResult>>
computeLinearIndex(OpFoldResult sourceOffset, ArrayRef<OpFoldResult> strides,
ArrayRef<OpFoldResult> indices);
+std::pair<AffineExpr, SmallVector<OpFoldResult>>
+computeLinearIndex(OpFoldResult sourceOffset, ArrayRef<int64_t> strides,
+ ArrayRef<Value> indices);
//===----------------------------------------------------------------------===//
// Utilities for decomposing larger shapes
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
index eff1aca42d3e..fe0e08b5c840 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensor/Storage.h
@@ -149,13 +149,6 @@ public:
MLIR_SPARSETENSOR_FOREVERY_V(DECL_GETVALUES)
#undef DECL_GETVALUES
- /// Element-wise forwarding insertions. The first argument is the
- /// dimension-coordinates for the value being inserted.
-#define DECL_FORWARDINGINSERT(VNAME, V) \
- virtual void forwardingInsert(const uint64_t *, V);
- MLIR_SPARSETENSOR_FOREVERY_V(DECL_FORWARDINGINSERT)
-#undef DECL_FORWARDINGINSERT
-
/// Element-wise insertion in lexicographic coordinate order. The first
/// argument is the level-coordinates for the value being inserted.
#define DECL_LEXINSERT(VNAME, V) virtual void lexInsert(const uint64_t *, V);
@@ -171,9 +164,6 @@ public:
MLIR_SPARSETENSOR_FOREVERY_V(DECL_EXPINSERT)
#undef DECL_EXPINSERT
- /// Finalizes forwarding insertions.
- virtual void endForwardingInsert() = 0;
-
/// Finalizes lexicographic insertions.
virtual void endLexInsert() = 0;
@@ -248,7 +238,7 @@ public:
static SparseTensorStorage<P, C, V> *
newEmpty(uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
- const uint64_t *dim2lvl, const uint64_t *lvl2dim, bool forwarding);
+ const uint64_t *dim2lvl, const uint64_t *lvl2dim);
/// Allocates a new sparse tensor and initializes it from the given COO.
static SparseTensorStorage<P, C, V> *
@@ -284,13 +274,6 @@ public:
*out = &values;
}
- /// Partially specialize forwarding insertions based on template types.
- void forwardingInsert(const uint64_t *dimCoords, V val) final {
- assert(dimCoords && coo);
- map.pushforward(dimCoords, lvlCursor.data());
- coo->add(lvlCursor, val);
- }
-
/// Partially specialize lexicographical insertions based on template types.
void lexInsert(const uint64_t *lvlCoords, V val) final {
assert(lvlCoords);
@@ -345,21 +328,6 @@ public:
}
}
- /// Finalizes forwarding insertions.
- void endForwardingInsert() final {
- // Ensure COO is sorted.
- assert(coo);
- coo->sort();
- // Now actually insert the `elements`.
- const auto &elements = coo->getElements();
- const uint64_t nse = elements.size();
- assert(values.size() == 0);
- values.reserve(nse);
- fromCOO(elements, 0, nse, 0);
- delete coo;
- coo = nullptr;
- }
-
/// Finalizes lexicographic insertions.
void endLexInsert() final {
if (!allDense) {
@@ -653,13 +621,10 @@ template <typename P, typename C, typename V>
SparseTensorStorage<P, C, V> *SparseTensorStorage<P, C, V>::newEmpty(
uint64_t dimRank, const uint64_t *dimSizes, uint64_t lvlRank,
const uint64_t *lvlSizes, const LevelType *lvlTypes,
- const uint64_t *dim2lvl, const uint64_t *lvl2dim, bool forwarding) {
- SparseTensorCOO<V> *lvlCOO = nullptr;
- if (forwarding)
- lvlCOO = new SparseTensorCOO<V>(lvlRank, lvlSizes);
+ const uint64_t *dim2lvl, const uint64_t *lvl2dim) {
return new SparseTensorStorage<P, C, V>(dimRank, dimSizes, lvlRank, lvlSizes,
- lvlTypes, dim2lvl, lvl2dim, lvlCOO,
- !forwarding);
+ lvlTypes, dim2lvl, lvl2dim, nullptr,
+ true);
}
template <typename P, typename C, typename V>
diff --git a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
index 8b0829aab0d8..d916186c835c 100644
--- a/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
+++ b/mlir/include/mlir/ExecutionEngine/SparseTensorRuntime.h
@@ -38,15 +38,12 @@ extern "C" {
/// This is the "swiss army knife" method for materializing sparse
/// tensors into the computation. The types of the `ptr` argument and
/// the result depend on the action, as explained in the following table,
-/// where "STS" means a sparse-tensor-storage object and "COO" means
-/// a coordinate-scheme object.
+/// where "STS" means a sparse-tensor-storage object.
///
/// Action: `ptr`: Returns:
+/// ---------------------------------------------------------------------------
/// kEmpty - STS, empty
-/// kEmptyForward - STS, empty, with forwarding COO
-/// kFromCOO COO STS, copied from the COO source
/// kFromReader reader STS, input from reader
-/// kToCOO STS COO, copied from the STS source
/// kPack buffers STS, from level buffers
/// kSortCOOInPlace STS STS, sorted in place
MLIR_CRUNNERUTILS_EXPORT void *_mlir_ciface_newSparseTensor( // NOLINT
@@ -80,14 +77,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSEPOSITIONS)
MLIR_SPARSETENSOR_FOREVERY_O(DECL_SPARSECOORDINATES)
#undef DECL_SPARSECOORDINATES
-/// Tensor-storage method for a dim to lvl forwarding insertion.
-#define DECL_FORWARDINGINSERT(VNAME, V) \
- MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_forwardingInsert##VNAME( \
- void *tensor, StridedMemRefType<V, 0> *vref, \
- StridedMemRefType<index_type, 1> *dimCoordsRef); \
- MLIR_SPARSETENSOR_FOREVERY_V(DECL_FORWARDINGINSERT)
-#undef DECL_FORWARDINGINSERT
-
/// Tensor-storage method to insert elements in lexicographical
/// level-coordinate order.
#define DECL_LEXINSERT(VNAME, V) \
@@ -160,21 +149,12 @@ MLIR_CRUNNERUTILS_EXPORT index_type sparseLvlSize(void *tensor, index_type l);
/// Tensor-storage method to get the size of the given dimension.
MLIR_CRUNNERUTILS_EXPORT index_type sparseDimSize(void *tensor, index_type d);
-/// Tensor-storage method to finalize forwarding insertions.
-MLIR_CRUNNERUTILS_EXPORT void endForwardingInsert(void *tensor);
-
/// Tensor-storage method to finalize lexicographic insertions.
MLIR_CRUNNERUTILS_EXPORT void endLexInsert(void *tensor);
/// Releases the memory for the tensor-storage object.
MLIR_CRUNNERUTILS_EXPORT void delSparseTensor(void *tensor);
-/// Releases the memory for the coordinate-scheme object.
-#define DECL_DELCOO(VNAME, V) \
- MLIR_CRUNNERUTILS_EXPORT void delSparseTensorCOO##VNAME(void *coo);
-MLIR_SPARSETENSOR_FOREVERY_V(DECL_DELCOO)
-#undef DECL_DELCOO
-
/// Helper function to read a sparse tensor filename from the environment,
/// defined with the naming convention ${TENSOR0}, ${TENSOR1}, etc.
MLIR_CRUNNERUTILS_EXPORT char *getTensorFilename(index_type id);
diff --git a/mlir/include/mlir/Interfaces/FunctionInterfaces.td b/mlir/include/mlir/Interfaces/FunctionInterfaces.td
index 98e002565cf1..970a781c998b 100644
--- a/mlir/include/mlir/Interfaces/FunctionInterfaces.td
+++ b/mlir/include/mlir/Interfaces/FunctionInterfaces.td
@@ -147,12 +147,12 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
}];
let extraSharedClassDeclaration = [{
/// Block list iterator types.
- using BlockListType = Region::BlockListType;
+ using BlockListType = ::mlir::Region::BlockListType;
using iterator = BlockListType::iterator;
using reverse_iterator = BlockListType::reverse_iterator;
/// Block argument iterator types.
- using BlockArgListType = Region::BlockArgListType;
+ using BlockArgListType = ::mlir::Region::BlockArgListType;
using args_iterator = BlockArgListType::iterator;
//===------------------------------------------------------------------===//
@@ -163,7 +163,7 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
bool isExternal() { return empty(); }
/// Return the region containing the body of this function.
- Region &getFunctionBody() { return $_op->getRegion(0); }
+ ::mlir::Region &getFunctionBody() { return $_op->getRegion(0); }
/// Delete all blocks from this function.
void eraseBody() {
@@ -183,39 +183,39 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
bool empty() { return getFunctionBody().empty(); }
/// Push a new block to the back of the body region.
- void push_back(Block *block) { getFunctionBody().push_back(block); }
+ void push_back(::mlir::Block *block) { getFunctionBody().push_back(block); }
/// Push a new block to the front of the body region.
- void push_front(Block *block) { getFunctionBody().push_front(block); }
+ void push_front(::mlir::Block *block) { getFunctionBody().push_front(block); }
/// Return the last block in the body region.
- Block &back() { return getFunctionBody().back(); }
+ ::mlir::Block &back() { return getFunctionBody().back(); }
/// Return the first block in the body region.
- Block &front() { return getFunctionBody().front(); }
+ ::mlir::Block &front() { return getFunctionBody().front(); }
/// Add an entry block to an empty function, and set up the block arguments
/// to match the signature of the function. The newly inserted entry block
/// is returned.
- Block *addEntryBlock() {
+ ::mlir::Block *addEntryBlock() {
assert(empty() && "function already has an entry block");
- Block *entry = new Block();
+ ::mlir::Block *entry = new ::mlir::Block();
push_back(entry);
// FIXME: Allow for passing in locations for these arguments instead of using
// the operations location.
- ArrayRef<Type> inputTypes = $_op.getArgumentTypes();
- SmallVector<Location> locations(inputTypes.size(),
- $_op.getOperation()->getLoc());
+ ::llvm::ArrayRef<::mlir::Type> inputTypes = $_op.getArgumentTypes();
+ ::llvm::SmallVector<::mlir::Location> locations(inputTypes.size(),
+ $_op.getOperation()->getLoc());
entry->addArguments(inputTypes, locations);
return entry;
}
/// Add a normal block to the end of the function's block list. The function
/// should at least already have an entry block.
- Block *addBlock() {
+ ::mlir::Block *addBlock() {
assert(!empty() && "function should at least have an entry block");
- push_back(new Block());
+ push_back(new ::mlir::Block());
return &back();
}
@@ -230,8 +230,8 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
/// - the argument/result attributes may need an update: if the new type
/// has less parameters we drop the extra attributes, if there are more
/// parameters they won't have any attributes.
- void setType(Type newType) {
- function_interface_impl::setFunctionType($_op, newType);
+ void setType(::mlir::Type newType) {
+ ::mlir::function_interface_impl::setFunctionType($_op, newType);
}
//===------------------------------------------------------------------===//
@@ -245,7 +245,7 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
unsigned getNumResults() { return $_op.getResultTypes().size(); }
/// Returns the entry block function argument at the given index.
- BlockArgument getArgument(unsigned idx) {
+ ::mlir::BlockArgument getArgument(unsigned idx) {
return getFunctionBody().getArgument(idx);
}
@@ -256,8 +256,8 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
/// Insert a single argument of type `argType` with attributes `argAttrs` and
/// location `argLoc` at `argIndex`.
- void insertArgument(unsigned argIndex, Type argType, DictionaryAttr argAttrs,
- Location argLoc) {
+ void insertArgument(unsigned argIndex, ::mlir::Type argType, ::mlir::DictionaryAttr argAttrs,
+ ::mlir::Location argLoc) {
insertArguments({argIndex}, {argType}, {argAttrs}, {argLoc});
}
@@ -265,20 +265,20 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
/// listed indices. `argIndices` must be sorted. Arguments are inserted in the
/// order they are listed, such that arguments with identical index will
/// appear in the same order that they were listed here.
- void insertArguments(ArrayRef<unsigned> argIndices, TypeRange argTypes,
- ArrayRef<DictionaryAttr> argAttrs,
- ArrayRef<Location> argLocs) {
+ void insertArguments(::llvm::ArrayRef<unsigned> argIndices, ::mlir::TypeRange argTypes,
+ ::llvm::ArrayRef<::mlir::DictionaryAttr> argAttrs,
+ ::llvm::ArrayRef<::mlir::Location> argLocs) {
unsigned originalNumArgs = $_op.getNumArguments();
- Type newType = $_op.getTypeWithArgsAndResults(
+ ::mlir::Type newType = $_op.getTypeWithArgsAndResults(
argIndices, argTypes, /*resultIndices=*/{}, /*resultTypes=*/{});
- function_interface_impl::insertFunctionArguments(
+ ::mlir::function_interface_impl::insertFunctionArguments(
$_op, argIndices, argTypes, argAttrs, argLocs,
originalNumArgs, newType);
}
/// Insert a single result of type `resultType` at `resultIndex`.
- void insertResult(unsigned resultIndex, Type resultType,
- DictionaryAttr resultAttrs) {
+ void insertResult(unsigned resultIndex, ::mlir::Type resultType,
+ ::mlir::DictionaryAttr resultAttrs) {
insertResults({resultIndex}, {resultType}, {resultAttrs});
}
@@ -286,41 +286,41 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
/// `resultIndices` must be sorted. Results are inserted in the order they are
/// listed, such that results with identical index will appear in the same
/// order that they were listed here.
- void insertResults(ArrayRef<unsigned> resultIndices, TypeRange resultTypes,
- ArrayRef<DictionaryAttr> resultAttrs) {
+ void insertResults(::llvm::ArrayRef<unsigned> resultIndices, ::mlir::TypeRange resultTypes,
+ ::llvm::ArrayRef<::mlir::DictionaryAttr> resultAttrs) {
unsigned originalNumResults = $_op.getNumResults();
- Type newType = $_op.getTypeWithArgsAndResults(
+ ::mlir::Type newType = $_op.getTypeWithArgsAndResults(
/*argIndices=*/{}, /*argTypes=*/{}, resultIndices, resultTypes);
- function_interface_impl::insertFunctionResults(
+ ::mlir::function_interface_impl::insertFunctionResults(
$_op, resultIndices, resultTypes, resultAttrs,
originalNumResults, newType);
}
/// Erase a single argument at `argIndex`.
void eraseArgument(unsigned argIndex) {
- BitVector argsToErase($_op.getNumArguments());
+ ::llvm::BitVector argsToErase($_op.getNumArguments());
argsToErase.set(argIndex);
eraseArguments(argsToErase);
}
/// Erases the arguments listed in `argIndices`.
- void eraseArguments(const BitVector &argIndices) {
- Type newType = $_op.getTypeWithoutArgs(argIndices);
- function_interface_impl::eraseFunctionArguments(
+ void eraseArguments(const ::llvm::BitVector &argIndices) {
+ ::mlir::Type newType = $_op.getTypeWithoutArgs(argIndices);
+ ::mlir::function_interface_impl::eraseFunctionArguments(
$_op, argIndices, newType);
}
/// Erase a single result at `resultIndex`.
void eraseResult(unsigned resultIndex) {
- BitVector resultsToErase($_op.getNumResults());
+ ::llvm::BitVector resultsToErase($_op.getNumResults());
resultsToErase.set(resultIndex);
eraseResults(resultsToErase);
}
/// Erases the results listed in `resultIndices`.
- void eraseResults(const BitVector &resultIndices) {
- Type newType = $_op.getTypeWithoutResults(resultIndices);
- function_interface_impl::eraseFunctionResults(
+ void eraseResults(const ::llvm::BitVector &resultIndices) {
+ ::mlir::Type newType = $_op.getTypeWithoutResults(resultIndices);
+ ::mlir::function_interface_impl::eraseFunctionResults(
$_op, resultIndices, newType);
}
@@ -328,13 +328,13 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
/// results inserted. This is used to update the function's signature in
/// the `insertArguments` and `insertResults` methods. The arrays must be
/// sorted by increasing index.
- Type getTypeWithArgsAndResults(
- ArrayRef<unsigned> argIndices, TypeRange argTypes,
- ArrayRef<unsigned> resultIndices, TypeRange resultTypes) {
- SmallVector<Type> argStorage, resultStorage;
- TypeRange newArgTypes = insertTypesInto(
+ ::mlir::Type getTypeWithArgsAndResults(
+ ::llvm::ArrayRef<unsigned> argIndices, ::mlir::TypeRange argTypes,
+ ::llvm::ArrayRef<unsigned> resultIndices, ::mlir::TypeRange resultTypes) {
+ ::llvm::SmallVector<::mlir::Type> argStorage, resultStorage;
+ ::mlir::TypeRange newArgTypes = insertTypesInto(
$_op.getArgumentTypes(), argIndices, argTypes, argStorage);
- TypeRange newResultTypes = insertTypesInto(
+ ::mlir::TypeRange newResultTypes = insertTypesInto(
$_op.getResultTypes(), resultIndices, resultTypes, resultStorage);
return $_op.cloneTypeWith(newArgTypes, newResultTypes);
}
@@ -342,24 +342,24 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
/// Return the type of this function without the specified arguments and
/// results. This is used to update the function's signature in the
/// `eraseArguments` and `eraseResults` methods.
- Type getTypeWithoutArgsAndResults(
- const BitVector &argIndices, const BitVector &resultIndices) {
- SmallVector<Type> argStorage, resultStorage;
- TypeRange newArgTypes = filterTypesOut(
+ ::mlir::Type getTypeWithoutArgsAndResults(
+ const ::llvm::BitVector &argIndices, const ::llvm::BitVector &resultIndices) {
+ ::llvm::SmallVector<::mlir::Type> argStorage, resultStorage;
+ ::mlir::TypeRange newArgTypes = filterTypesOut(
$_op.getArgumentTypes(), argIndices, argStorage);
- TypeRange newResultTypes = filterTypesOut(
+ ::mlir::TypeRange newResultTypes = filterTypesOut(
$_op.getResultTypes(), resultIndices, resultStorage);
return $_op.cloneTypeWith(newArgTypes, newResultTypes);
}
- Type getTypeWithoutArgs(const BitVector &argIndices) {
- SmallVector<Type> argStorage;
- TypeRange newArgTypes = filterTypesOut(
+ ::mlir::Type getTypeWithoutArgs(const ::llvm::BitVector &argIndices) {
+ ::llvm::SmallVector<::mlir::Type> argStorage;
+ ::mlir::TypeRange newArgTypes = filterTypesOut(
$_op.getArgumentTypes(), argIndices, argStorage);
return $_op.cloneTypeWith(newArgTypes, $_op.getResultTypes());
}
- Type getTypeWithoutResults(const BitVector &resultIndices) {
- SmallVector<Type> resultStorage;
- TypeRange newResultTypes = filterTypesOut(
+ ::mlir::Type getTypeWithoutResults(const ::llvm::BitVector &resultIndices) {
+ ::llvm::SmallVector<::mlir::Type> resultStorage;
+ ::mlir::TypeRange newResultTypes = filterTypesOut(
$_op.getResultTypes(), resultIndices, resultStorage);
return $_op.cloneTypeWith($_op.getArgumentTypes(), newResultTypes);
}
@@ -369,88 +369,88 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
//===------------------------------------------------------------------===//
/// Return all of the attributes for the argument at 'index'.
- ArrayRef<NamedAttribute> getArgAttrs(unsigned index) {
- return function_interface_impl::getArgAttrs($_op, index);
+ ::llvm::ArrayRef<::mlir::NamedAttribute> getArgAttrs(unsigned index) {
+ return ::mlir::function_interface_impl::getArgAttrs($_op, index);
}
/// Return an ArrayAttr containing all argument attribute dictionaries of
/// this function, or nullptr if no arguments have attributes.
- ArrayAttr getAllArgAttrs() { return $_op.getArgAttrsAttr(); }
+ ::mlir::ArrayAttr getAllArgAttrs() { return $_op.getArgAttrsAttr(); }
/// Return all argument attributes of this function.
- void getAllArgAttrs(SmallVectorImpl<DictionaryAttr> &result) {
- if (ArrayAttr argAttrs = getAllArgAttrs()) {
- auto argAttrRange = argAttrs.template getAsRange<DictionaryAttr>();
+ void getAllArgAttrs(::llvm::SmallVectorImpl<::mlir::DictionaryAttr> &result) {
+ if (::mlir::ArrayAttr argAttrs = getAllArgAttrs()) {
+ auto argAttrRange = argAttrs.template getAsRange<::mlir::DictionaryAttr>();
result.append(argAttrRange.begin(), argAttrRange.end());
} else {
result.append($_op.getNumArguments(),
- DictionaryAttr::get(this->getOperation()->getContext()));
+ ::mlir::DictionaryAttr::get(this->getOperation()->getContext()));
}
}
/// Return the specified attribute, if present, for the argument at 'index',
/// null otherwise.
- Attribute getArgAttr(unsigned index, StringAttr name) {
+ ::mlir::Attribute getArgAttr(unsigned index, ::mlir::StringAttr name) {
auto argDict = getArgAttrDict(index);
return argDict ? argDict.get(name) : nullptr;
}
- Attribute getArgAttr(unsigned index, StringRef name) {
+ ::mlir::Attribute getArgAttr(unsigned index, ::llvm::StringRef name) {
auto argDict = getArgAttrDict(index);
return argDict ? argDict.get(name) : nullptr;
}
template <typename AttrClass>
- AttrClass getArgAttrOfType(unsigned index, StringAttr name) {
+ AttrClass getArgAttrOfType(unsigned index, ::mlir::StringAttr name) {
return ::llvm::dyn_cast_or_null<AttrClass>(getArgAttr(index, name));
}
template <typename AttrClass>
- AttrClass getArgAttrOfType(unsigned index, StringRef name) {
+ AttrClass getArgAttrOfType(unsigned index, ::llvm::StringRef name) {
return ::llvm::dyn_cast_or_null<AttrClass>(getArgAttr(index, name));
}
/// Set the attributes held by the argument at 'index'.
- void setArgAttrs(unsigned index, ArrayRef<NamedAttribute> attributes) {
- function_interface_impl::setArgAttrs($_op, index, attributes);
+ void setArgAttrs(unsigned index, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes) {
+ ::mlir::function_interface_impl::setArgAttrs($_op, index, attributes);
}
/// Set the attributes held by the argument at 'index'. `attributes` may be
/// null, in which case any existing argument attributes are removed.
- void setArgAttrs(unsigned index, DictionaryAttr attributes) {
- function_interface_impl::setArgAttrs($_op, index, attributes);
+ void setArgAttrs(unsigned index, ::mlir::DictionaryAttr attributes) {
+ ::mlir::function_interface_impl::setArgAttrs($_op, index, attributes);
}
- void setAllArgAttrs(ArrayRef<DictionaryAttr> attributes) {
+ void setAllArgAttrs(::llvm::ArrayRef<::mlir::DictionaryAttr> attributes) {
assert(attributes.size() == $_op.getNumArguments());
- function_interface_impl::setAllArgAttrDicts($_op, attributes);
+ ::mlir::function_interface_impl::setAllArgAttrDicts($_op, attributes);
}
- void setAllArgAttrs(ArrayRef<Attribute> attributes) {
+ void setAllArgAttrs(::llvm::ArrayRef<::mlir::Attribute> attributes) {
assert(attributes.size() == $_op.getNumArguments());
- function_interface_impl::setAllArgAttrDicts($_op, attributes);
+ ::mlir::function_interface_impl::setAllArgAttrDicts($_op, attributes);
}
- void setAllArgAttrs(ArrayAttr attributes) {
+ void setAllArgAttrs(::mlir::ArrayAttr attributes) {
assert(attributes.size() == $_op.getNumArguments());
$_op.setArgAttrsAttr(attributes);
}
/// If the an attribute exists with the specified name, change it to the new
/// value. Otherwise, add a new attribute with the specified name/value.
- void setArgAttr(unsigned index, StringAttr name, Attribute value) {
- function_interface_impl::setArgAttr($_op, index, name, value);
+ void setArgAttr(unsigned index, ::mlir::StringAttr name, ::mlir::Attribute value) {
+ ::mlir::function_interface_impl::setArgAttr($_op, index, name, value);
}
- void setArgAttr(unsigned index, StringRef name, Attribute value) {
+ void setArgAttr(unsigned index, ::llvm::StringRef name, ::mlir::Attribute value) {
setArgAttr(index,
- StringAttr::get(this->getOperation()->getContext(), name),
+ ::mlir::StringAttr::get(this->getOperation()->getContext(), name),
value);
}
/// Remove the attribute 'name' from the argument at 'index'. Return the
/// attribute that was erased, or nullptr if there was no attribute with
/// such name.
- Attribute removeArgAttr(unsigned index, StringAttr name) {
- return function_interface_impl::removeArgAttr($_op, index, name);
+ ::mlir::Attribute removeArgAttr(unsigned index, ::mlir::StringAttr name) {
+ return ::mlir::function_interface_impl::removeArgAttr($_op, index, name);
}
- Attribute removeArgAttr(unsigned index, StringRef name) {
+ ::mlir::Attribute removeArgAttr(unsigned index, ::llvm::StringRef name) {
return removeArgAttr(
- index, StringAttr::get(this->getOperation()->getContext(), name));
+ index, ::mlir::StringAttr::get(this->getOperation()->getContext(), name));
}
//===------------------------------------------------------------------===//
@@ -458,102 +458,102 @@ def FunctionOpInterface : OpInterface<"FunctionOpInterface", [
//===------------------------------------------------------------------===//
/// Return all of the attributes for the result at 'index'.
- ArrayRef<NamedAttribute> getResultAttrs(unsigned index) {
- return function_interface_impl::getResultAttrs($_op, index);
+ ::llvm::ArrayRef<::mlir::NamedAttribute> getResultAttrs(unsigned index) {
+ return ::mlir::function_interface_impl::getResultAttrs($_op, index);
}
/// Return an ArrayAttr containing all result attribute dictionaries of this
/// function, or nullptr if no result have attributes.
- ArrayAttr getAllResultAttrs() { return $_op.getResAttrsAttr(); }
+ ::mlir::ArrayAttr getAllResultAttrs() { return $_op.getResAttrsAttr(); }
/// Return all result attributes of this function.
- void getAllResultAttrs(SmallVectorImpl<DictionaryAttr> &result) {
- if (ArrayAttr argAttrs = getAllResultAttrs()) {
- auto argAttrRange = argAttrs.template getAsRange<DictionaryAttr>();
+ void getAllResultAttrs(::llvm::SmallVectorImpl<::mlir::DictionaryAttr> &result) {
+ if (::mlir::ArrayAttr argAttrs = getAllResultAttrs()) {
+ auto argAttrRange = argAttrs.template getAsRange<::mlir::DictionaryAttr>();
result.append(argAttrRange.begin(), argAttrRange.end());
} else {
result.append($_op.getNumResults(),
- DictionaryAttr::get(this->getOperation()->getContext()));
+ ::mlir::DictionaryAttr::get(this->getOperation()->getContext()));
}
}
/// Return the specified attribute, if present, for the result at 'index',
/// null otherwise.
- Attribute getResultAttr(unsigned index, StringAttr name) {
+ ::mlir::Attribute getResultAttr(unsigned index, ::mlir::StringAttr name) {
auto argDict = getResultAttrDict(index);
return argDict ? argDict.get(name) : nullptr;
}
- Attribute getResultAttr(unsigned index, StringRef name) {
+ ::mlir::Attribute getResultAttr(unsigned index, ::llvm::StringRef name) {
auto argDict = getResultAttrDict(index);
return argDict ? argDict.get(name) : nullptr;
}
template <typename AttrClass>
- AttrClass getResultAttrOfType(unsigned index, StringAttr name) {
+ AttrClass getResultAttrOfType(unsigned index, ::mlir::StringAttr name) {
return ::llvm::dyn_cast_or_null<AttrClass>(getResultAttr(index, name));
}
template <typename AttrClass>
- AttrClass getResultAttrOfType(unsigned index, StringRef name) {
+ AttrClass getResultAttrOfType(unsigned index, ::llvm::StringRef name) {
return ::llvm::dyn_cast_or_null<AttrClass>(getResultAttr(index, name));
}
/// Set the attributes held by the result at 'index'.
- void setResultAttrs(unsigned index, ArrayRef<NamedAttribute> attributes) {
- function_interface_impl::setResultAttrs($_op, index, attributes);
+ void setResultAttrs(unsigned index, ::llvm::ArrayRef<::mlir::NamedAttribute> attributes) {
+ ::mlir::function_interface_impl::setResultAttrs($_op, index, attributes);
}
/// Set the attributes held by the result at 'index'. `attributes` may be
/// null, in which case any existing argument attributes are removed.
- void setResultAttrs(unsigned index, DictionaryAttr attributes) {
- function_interface_impl::setResultAttrs($_op, index, attributes);
+ void setResultAttrs(unsigned index, ::mlir::DictionaryAttr attributes) {
+ ::mlir::function_interface_impl::setResultAttrs($_op, index, attributes);
}
- void setAllResultAttrs(ArrayRef<DictionaryAttr> attributes) {
+ void setAllResultAttrs(::llvm::ArrayRef<::mlir::DictionaryAttr> attributes) {
assert(attributes.size() == $_op.getNumResults());
- function_interface_impl::setAllResultAttrDicts(
+ ::mlir::function_interface_impl::setAllResultAttrDicts(
$_op, attributes);
}
- void setAllResultAttrs(ArrayRef<Attribute> attributes) {
+ void setAllResultAttrs(::llvm::ArrayRef<::mlir::Attribute> attributes) {
assert(attributes.size() == $_op.getNumResults());
- function_interface_impl::setAllResultAttrDicts(
+ ::mlir::function_interface_impl::setAllResultAttrDicts(
$_op, attributes);
}
- void setAllResultAttrs(ArrayAttr attributes) {
+ void setAllResultAttrs(::mlir::ArrayAttr attributes) {
assert(attributes.size() == $_op.getNumResults());
$_op.setResAttrsAttr(attributes);
}
/// If the an attribute exists with the specified name, change it to the new
/// value. Otherwise, add a new attribute with the specified name/value.
- void setResultAttr(unsigned index, StringAttr name, Attribute value) {
- function_interface_impl::setResultAttr($_op, index, name, value);
+ void setResultAttr(unsigned index, ::mlir::StringAttr name, ::mlir::Attribute value) {
+ ::mlir::function_interface_impl::setResultAttr($_op, index, name, value);
}
- void setResultAttr(unsigned index, StringRef name, Attribute value) {
+ void setResultAttr(unsigned index, ::llvm::StringRef name, ::mlir::Attribute value) {
setResultAttr(index,
- StringAttr::get(this->getOperation()->getContext(), name),
+ ::mlir::StringAttr::get(this->getOperation()->getContext(), name),
value);
}
/// Remove the attribute 'name' from the result at 'index'. Return the
/// attribute that was erased, or nullptr if there was no attribute with
/// such name.
- Attribute removeResultAttr(unsigned index, StringAttr name) {
- return function_interface_impl::removeResultAttr($_op, index, name);
+ ::mlir::Attribute removeResultAttr(unsigned index, ::mlir::StringAttr name) {
+ return ::mlir::function_interface_impl::removeResultAttr($_op, index, name);
}
/// Returns the dictionary attribute corresponding to the argument at
/// 'index'. If there are no argument attributes at 'index', a null
/// attribute is returned.
- DictionaryAttr getArgAttrDict(unsigned index) {
+ ::mlir::DictionaryAttr getArgAttrDict(unsigned index) {
assert(index < $_op.getNumArguments() && "invalid argument number");
- return function_interface_impl::getArgAttrDict($_op, index);
+ return ::mlir::function_interface_impl::getArgAttrDict($_op, index);
}
/// Returns the dictionary attribute corresponding to the result at 'index'.
/// If there are no result attributes at 'index', a null attribute is
/// returned.
- DictionaryAttr getResultAttrDict(unsigned index) {
+ ::mlir::DictionaryAttr getResultAttrDict(unsigned index) {
assert(index < $_op.getNumResults() && "invalid result number");
- return function_interface_impl::getResultAttrDict($_op, index);
+ return ::mlir::function_interface_impl::getResultAttrDict($_op, index);
}
}];
diff --git a/mlir/include/mlir/Transforms/DialectConversion.h b/mlir/include/mlir/Transforms/DialectConversion.h
index 0d7722aa07ee..7e8e67a9d178 100644
--- a/mlir/include/mlir/Transforms/DialectConversion.h
+++ b/mlir/include/mlir/Transforms/DialectConversion.h
@@ -24,9 +24,11 @@ namespace mlir {
// Forward declarations.
class Attribute;
class Block;
+struct ConversionConfig;
class ConversionPatternRewriter;
class MLIRContext;
class Operation;
+struct OperationConverter;
class Type;
class Value;
@@ -657,12 +659,13 @@ struct ConversionPatternRewriterImpl;
/// hooks.
class ConversionPatternRewriter final : public PatternRewriter {
public:
- explicit ConversionPatternRewriter(MLIRContext *ctx);
~ConversionPatternRewriter() override;
/// Apply a signature conversion to the entry block of the given region. This
/// replaces the entry block with a new block containing the updated
/// signature. The new entry block to the region is returned for convenience.
+ /// If no block argument types are changing, the entry original block will be
+ /// left in place and returned.
///
/// If provided, `converter` will be used for any materializations.
Block *
@@ -671,8 +674,11 @@ public:
const TypeConverter *converter = nullptr);
/// Convert the types of block arguments within the given region. This
- /// replaces each block with a new block containing the updated signature. The
- /// entry block may have a special conversion if `entryConversion` is
+ /// replaces each block with a new block containing the updated signature. If
+ /// an updated signature would match the current signature, the respective
+ /// block is left in place as is.
+ ///
+ /// The entry block may have a special conversion if `entryConversion` is
/// provided. On success, the new entry block to the region is returned for
/// convenience. Otherwise, failure is returned.
FailureOr<Block *> convertRegionTypes(
@@ -681,7 +687,8 @@ public:
/// Convert the types of block arguments within the given region except for
/// the entry region. This replaces each non-entry block with a new block
- /// containing the updated signature.
+ /// containing the updated signature. If an updated signature would match the
+ /// current signature, the respective block is left in place as is.
///
/// If special conversion behavior is needed for the non-entry blocks (for
/// example, we need to convert only a subset of a BB arguments), such
@@ -758,6 +765,15 @@ public:
detail::ConversionPatternRewriterImpl &getImpl();
private:
+ // Allow OperationConverter to construct new rewriters.
+ friend struct OperationConverter;
+
+ /// Conversion pattern rewriters must not be used outside of dialect
+ /// conversions. They apply some IR rewrites in a delayed fashion and could
+ /// bring the IR into an inconsistent state when used standalone.
+ explicit ConversionPatternRewriter(MLIRContext *ctx,
+ const ConversionConfig &config);
+
// Hide unsupported pattern rewriter API.
using OpBuilder::setListener;
@@ -1057,6 +1073,30 @@ public:
#endif // MLIR_ENABLE_PDL_IN_PATTERNMATCH
//===----------------------------------------------------------------------===//
+// ConversionConfig
+//===----------------------------------------------------------------------===//
+
+/// Dialect conversion configuration.
+struct ConversionConfig {
+ /// An optional callback used to notify about match failure diagnostics during
+ /// the conversion. Diagnostics reported to this callback may only be
+ /// available in debug mode.
+ function_ref<void(Diagnostic &)> notifyCallback = nullptr;
+
+ /// Partial conversion only. All operations that are found not to be
+ /// legalizable are placed in this set. (Note that if there is an op
+ /// explicitly marked as illegal, the conversion terminates and the set will
+ /// not necessarily be complete.)
+ DenseSet<Operation *> *unlegalizedOps = nullptr;
+
+ /// Analysis conversion only. All operations that are found to be legalizable
+ /// are placed in this set. Note that no actual rewrites are applied to the
+ /// IR during an analysis conversion and only pre-existing operations are
+ /// added to the set.
+ DenseSet<Operation *> *legalizableOps = nullptr;
+};
+
+//===----------------------------------------------------------------------===//
// Op Conversion Entry Points
//===----------------------------------------------------------------------===//
@@ -1069,19 +1109,16 @@ public:
/// Apply a partial conversion on the given operations and all nested
/// operations. This method converts as many operations to the target as
/// possible, ignoring operations that failed to legalize. This method only
-/// returns failure if there ops explicitly marked as illegal. If an
-/// `unconvertedOps` set is provided, all operations that are found not to be
-/// legalizable to the given `target` are placed within that set. (Note that if
-/// there is an op explicitly marked as illegal, the conversion terminates and
-/// the `unconvertedOps` set will not necessarily be complete.)
+/// returns failure if there ops explicitly marked as illegal.
LogicalResult
-applyPartialConversion(ArrayRef<Operation *> ops, const ConversionTarget &target,
+applyPartialConversion(ArrayRef<Operation *> ops,
+ const ConversionTarget &target,
const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> *unconvertedOps = nullptr);
+ ConversionConfig config = ConversionConfig());
LogicalResult
applyPartialConversion(Operation *op, const ConversionTarget &target,
const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> *unconvertedOps = nullptr);
+ ConversionConfig config = ConversionConfig());
/// Apply a complete conversion on the given operations, and all nested
/// operations. This method returns failure if the conversion of any operation
@@ -1089,31 +1126,27 @@ applyPartialConversion(Operation *op, const ConversionTarget &target,
/// within 'ops'.
LogicalResult applyFullConversion(ArrayRef<Operation *> ops,
const ConversionTarget &target,
- const FrozenRewritePatternSet &patterns);
+ const FrozenRewritePatternSet &patterns,
+ ConversionConfig config = ConversionConfig());
LogicalResult applyFullConversion(Operation *op, const ConversionTarget &target,
- const FrozenRewritePatternSet &patterns);
+ const FrozenRewritePatternSet &patterns,
+ ConversionConfig config = ConversionConfig());
/// Apply an analysis conversion on the given operations, and all nested
/// operations. This method analyzes which operations would be successfully
/// converted to the target if a conversion was applied. All operations that
/// were found to be legalizable to the given 'target' are placed within the
-/// provided 'convertedOps' set; note that no actual rewrites are applied to the
-/// operations on success and only pre-existing operations are added to the set.
-/// This method only returns failure if there are unreachable blocks in any of
-/// the regions nested within 'ops'. There's an additional argument
-/// `notifyCallback` which is used for collecting match failure diagnostics
-/// generated during the conversion. Diagnostics are only reported to this
-/// callback may only be available in debug mode.
-LogicalResult applyAnalysisConversion(
- ArrayRef<Operation *> ops, ConversionTarget &target,
- const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> &convertedOps,
- function_ref<void(Diagnostic &)> notifyCallback = nullptr);
-LogicalResult applyAnalysisConversion(
- Operation *op, ConversionTarget &target,
- const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> &convertedOps,
- function_ref<void(Diagnostic &)> notifyCallback = nullptr);
+/// provided 'config.legalizableOps' set; note that no actual rewrites are
+/// applied to the operations on success. This method only returns failure if
+/// there are unreachable blocks in any of the regions nested within 'ops'.
+LogicalResult
+applyAnalysisConversion(ArrayRef<Operation *> ops, ConversionTarget &target,
+ const FrozenRewritePatternSet &patterns,
+ ConversionConfig config = ConversionConfig());
+LogicalResult
+applyAnalysisConversion(Operation *op, ConversionTarget &target,
+ const FrozenRewritePatternSet &patterns,
+ ConversionConfig config = ConversionConfig());
} // namespace mlir
#endif // MLIR_TRANSFORMS_DIALECTCONVERSION_H_
diff --git a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
index 1b729611a362..23e957288eb9 100644
--- a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
+++ b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
@@ -148,10 +148,10 @@ struct ExpM1OpLowering : public ConvertOpToLLVMPattern<math::ExpM1Op> {
return LLVM::detail::handleMultidimensionalVectors(
op.getOperation(), adaptor.getOperands(), *getTypeConverter(),
[&](Type llvm1DVectorTy, ValueRange operands) {
+ auto numElements = LLVM::getVectorNumElements(llvm1DVectorTy);
auto splatAttr = SplatElementsAttr::get(
- mlir::VectorType::get(
- {LLVM::getVectorNumElements(llvm1DVectorTy).getFixedValue()},
- floatType),
+ mlir::VectorType::get({numElements.getKnownMinValue()}, floatType,
+ {numElements.isScalable()}),
floatOne);
auto one =
rewriter.create<LLVM::ConstantOp>(loc, llvm1DVectorTy, splatAttr);
@@ -207,10 +207,10 @@ struct Log1pOpLowering : public ConvertOpToLLVMPattern<math::Log1pOp> {
return LLVM::detail::handleMultidimensionalVectors(
op.getOperation(), adaptor.getOperands(), *getTypeConverter(),
[&](Type llvm1DVectorTy, ValueRange operands) {
+ auto numElements = LLVM::getVectorNumElements(llvm1DVectorTy);
auto splatAttr = SplatElementsAttr::get(
- mlir::VectorType::get(
- {LLVM::getVectorNumElements(llvm1DVectorTy).getFixedValue()},
- floatType),
+ mlir::VectorType::get({numElements.getKnownMinValue()}, floatType,
+ {numElements.isScalable()}),
floatOne);
auto one =
rewriter.create<LLVM::ConstantOp>(loc, llvm1DVectorTy, splatAttr);
@@ -266,10 +266,10 @@ struct RsqrtOpLowering : public ConvertOpToLLVMPattern<math::RsqrtOp> {
return LLVM::detail::handleMultidimensionalVectors(
op.getOperation(), adaptor.getOperands(), *getTypeConverter(),
[&](Type llvm1DVectorTy, ValueRange operands) {
+ auto numElements = LLVM::getVectorNumElements(llvm1DVectorTy);
auto splatAttr = SplatElementsAttr::get(
- mlir::VectorType::get(
- {LLVM::getVectorNumElements(llvm1DVectorTy).getFixedValue()},
- floatType),
+ mlir::VectorType::get({numElements.getKnownMinValue()}, floatType,
+ {numElements.isScalable()}),
floatOne);
auto one =
rewriter.create<LLVM::ConstantOp>(loc, llvm1DVectorTy, splatAttr);
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index 7eb32ebe3228..7c477f2e1412 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -384,23 +384,23 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
if (isa<tosa::ClampOp>(op) && isa<IntegerType>(elementTy)) {
auto intTy = cast<IntegerType>(elementTy);
- int32_t min = static_cast<int32_t>(
- cast<IntegerAttr>(op->getAttr("min_int")).getValue().getSExtValue());
- int32_t max = static_cast<int32_t>(
- cast<IntegerAttr>(op->getAttr("max_int")).getValue().getSExtValue());
+ int64_t min =
+ cast<IntegerAttr>(op->getAttr("min_int")).getValue().getSExtValue();
+ int64_t max =
+ cast<IntegerAttr>(op->getAttr("max_int")).getValue().getSExtValue();
if (intTy.isUnsignedInteger()) {
- min = std::max<int32_t>(min, 0);
- max = std::min<int32_t>(
+ min = std::max(min, (int64_t)0);
+ max = std::min(
max,
APInt::getMaxValue(intTy.getIntOrFloatBitWidth()).getSExtValue());
} else {
- min = std::max<int32_t>(
- min, APInt::getSignedMinValue(intTy.getIntOrFloatBitWidth())
- .getSExtValue());
- max = std::min<int32_t>(
- max, APInt::getSignedMaxValue(intTy.getIntOrFloatBitWidth())
- .getSExtValue());
+ min =
+ std::max(min, APInt::getSignedMinValue(intTy.getIntOrFloatBitWidth())
+ .getSExtValue());
+ max =
+ std::min(max, APInt::getSignedMaxValue(intTy.getIntOrFloatBitWidth())
+ .getSExtValue());
}
auto minVal = rewriter.create<arith::ConstantIntOp>(
diff --git a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
index e88f82c92eba..26dfb3826337 100644
--- a/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
+++ b/mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
@@ -40,12 +40,12 @@ namespace {
//===----------------------------------------------------------------------===//
// Common match failure reasons.
-static constexpr StringLiteral MATCH_FAILURE_NOT_SME_TILE_TYPE_MULTIPLE(
+static constexpr StringLiteral kMatchFailureNotSMETileTypeMultiple(
"op vector size is not multiple of SME tiles");
-static constexpr StringLiteral MATCH_FAILURE_UNSUPPORTED_MASK_OP(
+static constexpr StringLiteral kMatchFailureUnsupportedMaskOp(
"op mask is unsupported for legalization/decomposition");
static constexpr StringLiteral
- MATCH_FAILURE_NON_PERMUTATION_MAP("op affine map is not a permutation");
+ kMatchFailureNonPermutationMap("op affine map is not a permutation");
/// An SMESubTile represents a single SME-sized sub-tile from decomposing a
/// larger vector type. The (`row`, `col`) are the position of the tile in the
@@ -174,8 +174,8 @@ struct LegalizeVectorOuterProductOpsByDecomposition
OneToNPatternRewriter &rewriter) const override {
auto vectorType = outerProductOp.getResultVectorType();
if (!isMultipleOfSMETileVectorType(vectorType))
- return rewriter.notifyMatchFailure(
- outerProductOp, MATCH_FAILURE_NOT_SME_TILE_TYPE_MULTIPLE);
+ return rewriter.notifyMatchFailure(outerProductOp,
+ kMatchFailureNotSMETileTypeMultiple);
Value mask;
Operation *rootOp = outerProductOp;
@@ -188,7 +188,7 @@ struct LegalizeVectorOuterProductOpsByDecomposition
if (!isSupportedMaskOp(mask))
return rewriter.notifyMatchFailure(outerProductOp,
- MATCH_FAILURE_UNSUPPORTED_MASK_OP);
+ kMatchFailureUnsupportedMaskOp);
ValueRange accSMETiles = adaptor.getAcc();
auto smeTileType = getSMETileTypeForElement(vectorType.getElementType());
@@ -252,18 +252,18 @@ struct LegalizeTransferReadOpsByDecomposition
OneToNPatternRewriter &rewriter) const override {
auto vectorType = readOp.getVectorType();
if (!isMultipleOfSMETileVectorType(vectorType))
- return rewriter.notifyMatchFailure(
- readOp, MATCH_FAILURE_NOT_SME_TILE_TYPE_MULTIPLE);
+ return rewriter.notifyMatchFailure(readOp,
+ kMatchFailureNotSMETileTypeMultiple);
auto mask = readOp.getMask();
if (!isSupportedMaskOp(mask))
return rewriter.notifyMatchFailure(readOp,
- MATCH_FAILURE_UNSUPPORTED_MASK_OP);
+ kMatchFailureUnsupportedMaskOp);
auto permutationMap = readOp.getPermutationMap();
if (!permutationMap.isPermutation())
return rewriter.notifyMatchFailure(readOp,
- MATCH_FAILURE_NON_PERMUTATION_MAP);
+ kMatchFailureNonPermutationMap);
// Note: For 2D vector types the only non-identity permutation is a simple
// tranpose [1, 0].
@@ -300,18 +300,18 @@ struct LegalizeTransferWriteOpsByDecomposition
OneToNPatternRewriter &rewriter) const override {
auto vectorType = writeOp.getVectorType();
if (!isMultipleOfSMETileVectorType(vectorType))
- return rewriter.notifyMatchFailure(
- writeOp, MATCH_FAILURE_NOT_SME_TILE_TYPE_MULTIPLE);
+ return rewriter.notifyMatchFailure(writeOp,
+ kMatchFailureNotSMETileTypeMultiple);
auto mask = writeOp.getMask();
if (!isSupportedMaskOp(mask))
return rewriter.notifyMatchFailure(writeOp,
- MATCH_FAILURE_UNSUPPORTED_MASK_OP);
+ kMatchFailureUnsupportedMaskOp);
auto permutationMap = writeOp.getPermutationMap();
if (!permutationMap.isPermutation())
return rewriter.notifyMatchFailure(writeOp,
- MATCH_FAILURE_NON_PERMUTATION_MAP);
+ kMatchFailureNonPermutationMap);
// Note: For 2D vector types the only non-identity permutation is a simple
// tranpose [1, 0].
diff --git a/mlir/lib/Dialect/GPU/CMakeLists.txt b/mlir/lib/Dialect/GPU/CMakeLists.txt
index e5776e157b61..51cfa2216e0c 100644
--- a/mlir/lib/Dialect/GPU/CMakeLists.txt
+++ b/mlir/lib/Dialect/GPU/CMakeLists.txt
@@ -1,11 +1,3 @@
-if ("NVPTX" IN_LIST LLVM_TARGETS_TO_BUILD)
- set(NVPTX_LIBS
- NVPTXCodeGen
- NVPTXDesc
- NVPTXInfo
- )
-endif()
-
if (MLIR_ENABLE_ROCM_CONVERSIONS)
set(AMDGPU_LIBS
IRReader
@@ -60,7 +52,6 @@ add_mlir_dialect_library(MLIRGPUTransforms
Transforms/ParallelLoopMapper.cpp
Transforms/ROCDLAttachTarget.cpp
Transforms/SerializeToBlob.cpp
- Transforms/SerializeToCubin.cpp
Transforms/SerializeToHsaco.cpp
Transforms/ShuffleRewriter.cpp
Transforms/SPIRVAttachTarget.cpp
@@ -74,7 +65,6 @@ add_mlir_dialect_library(MLIRGPUTransforms
Core
MC
Target
- ${NVPTX_LIBS}
${AMDGPU_LIBS}
DEPENDS
@@ -110,48 +100,6 @@ add_mlir_dialect_library(MLIRGPUTransforms
add_subdirectory(TransformOps)
add_subdirectory(Pipelines)
-if(MLIR_ENABLE_CUDA_RUNNER)
- if(NOT MLIR_ENABLE_CUDA_CONVERSIONS)
- message(SEND_ERROR
- "Building mlir with cuda support requires the NVPTX backend")
- endif()
-
- # Configure CUDA language support. Using check_language first allows us to
- # give a custom error message.
- include(CheckLanguage)
- check_language(CUDA)
- if (CMAKE_CUDA_COMPILER)
- enable_language(CUDA)
- else()
- message(SEND_ERROR
- "Building mlir with cuda support requires a working CUDA install")
- endif()
-
- # Enable gpu-to-cubin pass.
- target_compile_definitions(obj.MLIRGPUTransforms
- PRIVATE
- MLIR_GPU_TO_CUBIN_PASS_ENABLE=1
- )
-
- # Add CUDA headers includes and the libcuda.so library.
- target_include_directories(obj.MLIRGPUTransforms
- PRIVATE
- ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}
- )
-
- # Add link path for the cuda driver library.
- find_library(CUDA_DRIVER_LIBRARY cuda HINTS ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES} REQUIRED)
- get_filename_component(CUDA_DRIVER_LIBRARY_PATH "${CUDA_DRIVER_LIBRARY}" DIRECTORY)
- target_link_directories(MLIRGPUTransforms PRIVATE ${CUDA_DRIVER_LIBRARY_PATH})
-
- target_link_libraries(MLIRGPUTransforms
- PRIVATE
- MLIRNVVMToLLVMIRTranslation
- cuda
- )
-
-endif()
-
if(MLIR_ENABLE_ROCM_CONVERSIONS)
if (NOT ("AMDGPU" IN_LIST LLVM_TARGETS_TO_BUILD))
message(SEND_ERROR
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index 30b6cd74147e..33ce5c159db4 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -648,6 +648,8 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
TypeRange workgroupAttributions,
TypeRange privateAttributions, Value clusterSizeX,
Value clusterSizeY, Value clusterSizeZ) {
+ OpBuilder::InsertionGuard g(builder);
+
// Add a WorkGroup attribution attribute. This attribute is required to
// identify private attributions in the list of block argguments.
result.addAttribute(getNumWorkgroupAttributionsAttrName(),
@@ -674,7 +676,7 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
// attributions, where the first kNumConfigRegionAttributes arguments have
// `index` type and the rest have the same types as the data operands.
Region *kernelRegion = result.addRegion();
- Block *body = new Block();
+ Block *body = builder.createBlock(kernelRegion);
// TODO: Allow passing in proper locations here.
for (unsigned i = 0; i < kNumConfigRegionAttributes; ++i)
body->addArgument(builder.getIndexType(), result.location);
@@ -683,7 +685,6 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
body->addArgument(argTy, result.location);
for (Type argTy : privateAttributions)
body->addArgument(argTy, result.location);
- kernelRegion->push_back(body);
// Fill OperandSegmentSize Attribute.
SmallVector<int32_t, 11> segmentSizes(11, 1);
segmentSizes.front() = asyncDependencies.size();
@@ -1325,6 +1326,8 @@ void GPUFuncOp::build(OpBuilder &builder, OperationState &result,
TypeRange workgroupAttributions,
TypeRange privateAttributions,
ArrayRef<NamedAttribute> attrs) {
+ OpBuilder::InsertionGuard g(builder);
+
result.addAttribute(SymbolTable::getSymbolAttrName(),
builder.getStringAttr(name));
result.addAttribute(getFunctionTypeAttrName(result.name),
@@ -1333,7 +1336,7 @@ void GPUFuncOp::build(OpBuilder &builder, OperationState &result,
builder.getI64IntegerAttr(workgroupAttributions.size()));
result.addAttributes(attrs);
Region *body = result.addRegion();
- Block *entryBlock = new Block;
+ Block *entryBlock = builder.createBlock(body);
// TODO: Allow passing in proper locations here.
for (Type argTy : type.getInputs())
@@ -1342,8 +1345,6 @@ void GPUFuncOp::build(OpBuilder &builder, OperationState &result,
entryBlock->addArgument(argTy, result.location);
for (Type argTy : privateAttributions)
entryBlock->addArgument(argTy, result.location);
-
- body->getBlocks().push_back(entryBlock);
}
/// Parses a GPU function memory attribution.
diff --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp
deleted file mode 100644
index 34ad4e6868e1..000000000000
--- a/mlir/lib/Dialect/GPU/Transforms/SerializeToCubin.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-//===- LowerGPUToCUBIN.cpp - Convert GPU kernel to CUBIN blob -------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements a pass that serializes a gpu module into CUBIN blob and
-// adds that blob as a string attribute of the module.
-//
-//===----------------------------------------------------------------------===//
-
-#include "mlir/Dialect/GPU/Transforms/Passes.h"
-#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
-#include "llvm/Support/Debug.h"
-
-#if MLIR_GPU_TO_CUBIN_PASS_ENABLE
-#include "mlir/Pass/Pass.h"
-#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
-#include "mlir/Target/LLVMIR/Export.h"
-#include "llvm/Support/TargetSelect.h"
-#include "llvm/Support/Threading.h"
-
-#include <cuda.h>
-
-using namespace mlir;
-
-static void emitCudaError(const llvm::Twine &expr, const char *buffer,
- CUresult result, Location loc) {
- const char *error = nullptr;
- cuGetErrorString(result, &error);
- emitError(loc,
- expr.concat(error ? " failed with error code " + llvm::Twine{error}
- : llvm::Twine(" failed with unknown error "))
- .concat("[")
- .concat(buffer)
- .concat("]"));
-}
-
-#define RETURN_ON_CUDA_ERROR(expr) \
- do { \
- if (auto status = (expr)) { \
- emitCudaError(#expr, jitErrorBuffer, status, loc); \
- return {}; \
- } \
- } while (false)
-
-namespace {
-class SerializeToCubinPass
- : public PassWrapper<SerializeToCubinPass, gpu::SerializeToBlobPass> {
- static llvm::once_flag initializeBackendOnce;
-
-public:
- MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(SerializeToCubinPass)
-
- SerializeToCubinPass(StringRef triple = "nvptx64-nvidia-cuda",
- StringRef chip = "sm_35", StringRef features = "+ptx60",
- int optLevel = 2, bool dumpPtx = false);
-
- StringRef getArgument() const override { return "gpu-to-cubin"; }
- StringRef getDescription() const override {
- return "Lower GPU kernel function to CUBIN binary annotations";
- }
-
-private:
- // Serializes PTX to CUBIN.
- std::unique_ptr<std::vector<char>>
- serializeISA(const std::string &isa) override;
-};
-} // namespace
-
-// Sets the 'option' to 'value' unless it already has a value.
-static void maybeSetOption(Pass::Option<std::string> &option, StringRef value) {
- if (!option.hasValue())
- option = value.str();
-}
-
-llvm::once_flag SerializeToCubinPass::initializeBackendOnce;
-
-SerializeToCubinPass::SerializeToCubinPass(StringRef triple, StringRef chip,
- StringRef features, int optLevel,
- bool dumpPtx) {
- // No matter how this pass is constructed, ensure that the NVPTX backend
- // is initialized exactly once.
- llvm::call_once(initializeBackendOnce, []() {
- // Initialize LLVM NVPTX backend.
-#if LLVM_HAS_NVPTX_TARGET
- LLVMInitializeNVPTXTarget();
- LLVMInitializeNVPTXTargetInfo();
- LLVMInitializeNVPTXTargetMC();
- LLVMInitializeNVPTXAsmPrinter();
-#endif
- });
-
- maybeSetOption(this->triple, triple);
- maybeSetOption(this->chip, chip);
- maybeSetOption(this->features, features);
- this->dumpPtx = dumpPtx;
- if (this->optLevel.getNumOccurrences() == 0)
- this->optLevel.setValue(optLevel);
-}
-
-std::unique_ptr<std::vector<char>>
-SerializeToCubinPass::serializeISA(const std::string &isa) {
- Location loc = getOperation().getLoc();
- char jitErrorBuffer[4096] = {0};
-
- RETURN_ON_CUDA_ERROR(cuInit(0));
-
- // Linking requires a device context.
- CUdevice device;
- RETURN_ON_CUDA_ERROR(cuDeviceGet(&device, 0));
- CUcontext context;
- // Use the primary context.
- RETURN_ON_CUDA_ERROR(cuDevicePrimaryCtxRetain(&context, device));
- // Push the primary context so that the next CUDA operations
- // actually use it.
- RETURN_ON_CUDA_ERROR(cuCtxPushCurrent(context));
- CUlinkState linkState;
-
- CUjit_option jitOptions[] = {CU_JIT_ERROR_LOG_BUFFER,
- CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES};
- void *jitOptionsVals[] = {jitErrorBuffer,
- reinterpret_cast<void *>(sizeof(jitErrorBuffer))};
-
- RETURN_ON_CUDA_ERROR(cuLinkCreate(2, /* number of jit options */
- jitOptions, /* jit options */
- jitOptionsVals, /* jit option values */
- &linkState));
-
- auto kernelName = getOperation().getName().str();
- if (dumpPtx) {
- llvm::dbgs() << " Kernel Name : [" << kernelName << "]\n";
- llvm::dbgs() << isa << "\n";
- }
- RETURN_ON_CUDA_ERROR(cuLinkAddData(
- linkState, CUjitInputType::CU_JIT_INPUT_PTX,
- const_cast<void *>(static_cast<const void *>(isa.c_str())), isa.length(),
- kernelName.c_str(), 0, /* number of jit options */
- nullptr, /* jit options */
- nullptr /* jit option values */
- ));
-
- void *cubinData;
- size_t cubinSize;
- RETURN_ON_CUDA_ERROR(cuLinkComplete(linkState, &cubinData, &cubinSize));
-
- char *cubinAsChar = static_cast<char *>(cubinData);
- auto result =
- std::make_unique<std::vector<char>>(cubinAsChar, cubinAsChar + cubinSize);
-
- // This will also destroy the cubin data.
- RETURN_ON_CUDA_ERROR(cuLinkDestroy(linkState));
- // Pop and release the primary context.
- CUcontext poppedContext;
- RETURN_ON_CUDA_ERROR(cuCtxPopCurrent(&poppedContext));
- RETURN_ON_CUDA_ERROR(cuDevicePrimaryCtxRelease(device));
-
- return result;
-}
-
-// Register pass to serialize GPU kernel functions to a CUBIN binary annotation.
-void mlir::registerGpuSerializeToCubinPass() {
- PassRegistration<SerializeToCubinPass> registerSerializeToCubin(
- [] { return std::make_unique<SerializeToCubinPass>(); });
-}
-
-std::unique_ptr<Pass> mlir::createGpuSerializeToCubinPass(StringRef triple,
- StringRef arch,
- StringRef features,
- int optLevel,
- bool dumpPtx) {
- return std::make_unique<SerializeToCubinPass>(triple, arch, features,
- optLevel, dumpPtx);
-}
-
-#else // MLIR_GPU_TO_CUBIN_PASS_ENABLE
-void mlir::registerGpuSerializeToCubinPass() {}
-#endif // MLIR_GPU_TO_CUBIN_PASS_ENABLE
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index 7eed7928456d..3627ff6617ed 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -1041,6 +1041,11 @@ int64_t LinalgOp::getIndexingMapIndex(OpOperand *opOperand) {
LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) {
LinalgOp linalgOp = cast<LinalgOp>(op);
+ // Mixed tensor/buffer operands are not allowed.
+ if (!linalgOp.hasPureTensorSemantics() &&
+ !linalgOp.hasPureBufferSemantics() && op->getNumOperands() > 0)
+ return op->emitOpError("expected to have pure tensor or buffer semantics");
+
// Before checking indexing maps, we need to make sure the attributes
// referenced by it are valid.
if (linalgOp.hasDynamicIndexingMaps())
diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
index 71e4e13103f5..962cb28b7c2a 100644
--- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
@@ -1471,6 +1471,16 @@ RsqrtApproximation::matchAndRewrite(math::RsqrtOp op,
//----------------------------------------------------------------------------//
+void mlir::populatePolynomialApproximateTanhPattern(
+ RewritePatternSet &patterns) {
+ patterns.add<TanhApproximation>(patterns.getContext());
+}
+
+void mlir::populatePolynomialApproximateErfPattern(
+ RewritePatternSet &patterns) {
+ patterns.add<ErfPolynomialApproximation>(patterns.getContext());
+}
+
void mlir::populateMathPolynomialApproximationPatterns(
RewritePatternSet &patterns,
const MathPolynomialApproximationOptions &options) {
diff --git a/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp b/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp
index 7cbe0de04876..c4d8b0b15e46 100644
--- a/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp
+++ b/mlir/lib/Dialect/Mesh/Transforms/Spmdization.cpp
@@ -593,7 +593,6 @@ static SmallVector<MeshShardingAttr> getOperandShardings(Operation &op) {
Operation *definingOp = operand.getDefiningOp();
assert(definingOp);
ShardOp shardOp = llvm::cast<ShardOp>(definingOp);
- assert(shardOp.getAnnotateForUsers());
return shardOp.getShard();
});
return res;
@@ -615,34 +614,46 @@ static SmallVector<MeshShardingAttr> getResultShardings(Operation &op) {
assert(result.hasOneUse());
Operation *userOp = *result.getUsers().begin();
ShardOp shardOp = llvm::cast<ShardOp>(userOp);
- assert(!shardOp.getAnnotateForUsers());
return shardOp.getShard();
});
return res;
}
static LogicalResult
-spmdizeOperation(Operation &op, IRMapping &spmdizationMap,
+spmdizeOperation(ShardOp shardOp, IRMapping &spmdizationMap,
SymbolTableCollection &symbolTableCollection,
OpBuilder &builder) {
- ShardOp shardOp = llvm::dyn_cast<ShardOp>(op);
- if (shardOp) {
- if (!shardOp.getAnnotateForUsers()) {
- return success();
- }
-
+ Value targetSpmdValue;
+
+ // Check if 2 shard ops are chained. If not there is no need for resharding
+ // as the source and target shared the same sharding.
+ ShardOp srcShardOp =
+ dyn_cast_or_null<ShardOp>(shardOp.getOperand().getDefiningOp());
+ if (!srcShardOp) {
+ targetSpmdValue = spmdizationMap.lookup(shardOp.getOperand());
+ } else {
// Insert resharding.
- ShardOp srcShardOp =
- llvm::cast<ShardOp>(shardOp.getOperand().getDefiningOp());
- assert(!srcShardOp.getAnnotateForUsers());
+ assert(!srcShardOp.getAnnotateForUsers() && shardOp.getAnnotateForUsers());
TypedValue<ShapedType> srcSpmdValue =
spmdizationMap.lookup(srcShardOp.getOperand())
.cast<TypedValue<ShapedType>>();
- Value targetSpmdValue = reshard(builder, srcShardOp, shardOp, srcSpmdValue,
- symbolTableCollection);
- assert(!spmdizationMap.contains(shardOp.getResult()));
- spmdizationMap.map(shardOp.getResult(), targetSpmdValue);
- return success();
+ targetSpmdValue = reshard(builder, srcShardOp, shardOp, srcSpmdValue,
+ symbolTableCollection);
+ }
+
+ assert(!spmdizationMap.contains(shardOp.getResult()));
+ spmdizationMap.map(shardOp.getResult(), targetSpmdValue);
+ return success();
+}
+
+static LogicalResult
+spmdizeOperation(Operation &op, IRMapping &spmdizationMap,
+ SymbolTableCollection &symbolTableCollection,
+ OpBuilder &builder) {
+ ShardOp shardOp = llvm::dyn_cast<ShardOp>(op);
+ if (shardOp) {
+ return spmdizeOperation(shardOp, spmdizationMap, symbolTableCollection,
+ builder);
}
SmallVector<Value> spmdizedOperands;
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index 119df9acd9e9..233e702dbb22 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -306,17 +306,18 @@ void ConditionOp::getSuccessorRegions(
void ForOp::build(OpBuilder &builder, OperationState &result, Value lb,
Value ub, Value step, ValueRange iterArgs,
BodyBuilderFn bodyBuilder) {
+ OpBuilder::InsertionGuard guard(builder);
+
result.addOperands({lb, ub, step});
result.addOperands(iterArgs);
for (Value v : iterArgs)
result.addTypes(v.getType());
Type t = lb.getType();
Region *bodyRegion = result.addRegion();
- bodyRegion->push_back(new Block);
- Block &bodyBlock = bodyRegion->front();
- bodyBlock.addArgument(t, result.location);
+ Block *bodyBlock = builder.createBlock(bodyRegion);
+ bodyBlock->addArgument(t, result.location);
for (Value v : iterArgs)
- bodyBlock.addArgument(v.getType(), v.getLoc());
+ bodyBlock->addArgument(v.getType(), v.getLoc());
// Create the default terminator if the builder is not provided and if the
// iteration arguments are not provided. Otherwise, leave this to the caller
@@ -325,9 +326,9 @@ void ForOp::build(OpBuilder &builder, OperationState &result, Value lb,
ForOp::ensureTerminator(*bodyRegion, builder, result.location);
} else if (bodyBuilder) {
OpBuilder::InsertionGuard guard(builder);
- builder.setInsertionPointToStart(&bodyBlock);
- bodyBuilder(builder, result.location, bodyBlock.getArgument(0),
- bodyBlock.getArguments().drop_front());
+ builder.setInsertionPointToStart(bodyBlock);
+ bodyBuilder(builder, result.location, bodyBlock->getArgument(0),
+ bodyBlock->getArguments().drop_front());
}
}
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
index 950ee597b891..62d07859e32f 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
@@ -1330,6 +1330,7 @@ NARY_SHAPE_INFER(tosa::CastOp)
NARY_SHAPE_INFER(tosa::CeilOp)
NARY_SHAPE_INFER(tosa::ClampOp)
NARY_SHAPE_INFER(tosa::ClzOp)
+NARY_SHAPE_INFER(tosa::CosOp)
NARY_SHAPE_INFER(tosa::DivOp)
NARY_SHAPE_INFER(tosa::ExpOp)
NARY_SHAPE_INFER(tosa::FloorOp)
@@ -1352,6 +1353,7 @@ NARY_SHAPE_INFER(tosa::ReciprocalOp)
NARY_SHAPE_INFER(tosa::RescaleOp)
NARY_SHAPE_INFER(tosa::ReverseOp)
NARY_SHAPE_INFER(tosa::RsqrtOp)
+NARY_SHAPE_INFER(tosa::SinOp)
NARY_SHAPE_INFER(tosa::SelectOp)
NARY_SHAPE_INFER(tosa::SubOp)
NARY_SHAPE_INFER(tosa::TanhOp)
diff --git a/mlir/lib/Dialect/Utils/IndexingUtils.cpp b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
index baaa581ab6f2..4c960659d80c 100644
--- a/mlir/lib/Dialect/Utils/IndexingUtils.cpp
+++ b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
@@ -7,13 +7,12 @@
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Utils/IndexingUtils.h"
-
+#include "mlir/Dialect/Utils/StaticValueUtils.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/MLIRContext.h"
#include "llvm/ADT/STLExtras.h"
-
#include <numeric>
#include <optional>
@@ -306,6 +305,14 @@ mlir::computeLinearIndex(OpFoldResult sourceOffset,
return {expr, values};
}
+std::pair<AffineExpr, SmallVector<OpFoldResult>>
+mlir::computeLinearIndex(OpFoldResult sourceOffset, ArrayRef<int64_t> strides,
+ ArrayRef<Value> indices) {
+ return computeLinearIndex(
+ sourceOffset, getAsIndexOpFoldResult(sourceOffset.getContext(), strides),
+ getAsOpFoldResult(ValueRange(indices)));
+}
+
//===----------------------------------------------------------------------===//
// TileOffsetRange
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
index 36fb66708407..fc11ae63e718 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorEmulateNarrowType.cpp
@@ -724,9 +724,8 @@ BitCastRewriter::BitCastRewriter(VectorType sourceVectorType,
static LogicalResult commonConversionPrecondition(PatternRewriter &rewriter,
VectorType preconditionType,
Operation *op) {
- if (!preconditionType || preconditionType.getRank() != 1 ||
- preconditionType.isScalable())
- return rewriter.notifyMatchFailure(op, "scalable or >1-D vector");
+ if (!preconditionType || preconditionType.isScalable())
+ return rewriter.notifyMatchFailure(op, "scalable vector");
// TODO: consider relaxing this restriction in the future if we find ways
// to really work with subbyte elements across the MLIR/LLVM boundary.
@@ -743,6 +742,9 @@ LogicalResult BitCastRewriter::commonPrecondition(PatternRewriter &rewriter,
if (!enumerator.sourceVectorType || !enumerator.targetVectorType)
return rewriter.notifyMatchFailure(op, "types are not vector");
+ if (!preconditionType || preconditionType.getRank() != 1)
+ return rewriter.notifyMatchFailure(op, "unsupported >1-D vector");
+
return commonConversionPrecondition(rewriter, preconditionType, op);
}
@@ -855,7 +857,6 @@ static Value rewriteI4ToI8SignedExt(PatternRewriter &rewriter, Location loc,
"Expected i4 type");
// 1. Generate a bitcast vector<Xxi4> -> vector<X/2xi8>.
- int64_t vecDimSize = srcVecType.getShape().back();
SmallVector<int64_t> i8VecShape = llvm::to_vector(srcVecType.getShape());
constexpr int64_t i4Toi8BitwidthFactor = 2;
i8VecShape.back() = i8VecShape.back() / i4Toi8BitwidthFactor;
@@ -871,16 +872,8 @@ static Value rewriteI4ToI8SignedExt(PatternRewriter &rewriter, Location loc,
Value low = rewriter.create<arith::ShRSIOp>(loc, shl, shiftValues);
Value high = rewriter.create<arith::ShRSIOp>(loc, i8Vector, shiftValues);
- // 3. Interleave low and high i8 elements using a shuffle.
- SmallVector<int64_t> interleaveMaskValues;
- interleaveMaskValues.reserve(vecDimSize);
- for (int i = 0, end = vecDimSize / 2; i < end; ++i) {
- interleaveMaskValues.push_back(i);
- interleaveMaskValues.push_back(i + (vecDimSize / 2));
- }
-
- return rewriter.create<vector::ShuffleOp>(
- loc, low, high, rewriter.getI64ArrayAttr(interleaveMaskValues));
+ // 3. Interleave low and high i8 elements.
+ return rewriter.create<vector::InterleaveOp>(loc, low, high);
}
namespace {
@@ -1008,8 +1001,7 @@ struct RewriteExtOfBitCast : OpRewritePattern<ExtOpType> {
/// %1 = arith.shli %0, 4 : vector<4xi8>
/// %2 = arith.shrsi %1, 4 : vector<4xi8>
/// %3 = arith.shrsi %0, 4 : vector<4xi8>
-/// %4 = vector.shuffle %2, %3 [0, 4, 1, 5, 2, 6, 3, 7]
-/// : vector<4xi8>, vector<4xi8>
+/// %4 = vector.interleave %2, %3 : vector<4xi8>
/// %5 = arith.extsi %4 : vector<8xi8> to vector<8xi32>
///
/// arith.sitofp %in : vector<8xi4> to vector<8xf32>
@@ -1018,8 +1010,7 @@ struct RewriteExtOfBitCast : OpRewritePattern<ExtOpType> {
/// %1 = arith.shli %0, 4 : vector<4xi8>
/// %2 = arith.shrsi %1, 4 : vector<4xi8>
/// %3 = arith.shrsi %0, 4 : vector<4xi8>
-/// %4 = vector.shuffle %2, %3 [0, 4, 1, 5, 2, 6, 3, 7]
-/// : vector<4xi8>, vector<4xi8>
+/// %4 = vector.interleave %2, %3 : vector<4xi8>
/// %5 = arith.sitofp %4 : vector<8xi8> to vector<8xf32>
///
template <typename ConversionOpType>
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
index 04e5a816dd91..0ffef6aabccc 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransferOpTransforms.cpp
@@ -15,6 +15,7 @@
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
+#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h"
#include "mlir/Dialect/Vector/Transforms/VectorTransforms.h"
@@ -577,7 +578,6 @@ public:
if (transferReadOp.getMask())
return failure();
- SmallVector<Value> collapsedIndices;
int64_t firstDimToCollapse = sourceType.getRank() - vectorType.getRank();
// 1. Collapse the source memref
@@ -599,12 +599,14 @@ public:
// 2.2 New indices
// If all the collapsed indices are zero then no extra logic is needed.
// Otherwise, a new offset/index has to be computed.
+ SmallVector<Value> collapsedIndices;
if (failed(checkAndCollapseInnerZeroIndices(transferReadOp.getIndices(),
firstDimToCollapse,
collapsedIndices))) {
- // Copy all the leading indices
- collapsedIndices = transferReadOp.getIndices();
- collapsedIndices.resize(firstDimToCollapse);
+ // Copy all the leading indices.
+ SmallVector<Value> indices = transferReadOp.getIndices();
+ collapsedIndices.append(indices.begin(),
+ indices.begin() + firstDimToCollapse);
// Compute the remaining trailing index/offset required for reading from
// the collapsed memref:
@@ -621,24 +623,26 @@ public:
// memref<1x86xi32>, vector<2xi32>
// one would get the following offset:
// %offset = %arg0 * 43
- AffineExpr offsetExpr, idxExpr;
- bindSymbols(rewriter.getContext(), offsetExpr, idxExpr);
-
- int64_t outputRank = transferReadOp.getIndices().size();
- OpFoldResult offset =
+ OpFoldResult collapsedOffset =
rewriter.create<arith::ConstantIndexOp>(loc, 0).getResult();
- for (int64_t i = firstDimToCollapse; i < outputRank; ++i) {
- int64_t dim = dyn_cast<ShapedType>(source.getType()).getDimSize(i);
- offset = affine::makeComposedFoldedAffineApply(
- rewriter, loc, offsetExpr + dim * idxExpr,
- {offset, transferReadOp.getIndices()[i]});
- }
- if (offset.is<Value>()) {
- collapsedIndices.push_back(offset.get<Value>());
+ auto sourceShape = sourceType.getShape();
+ auto collapsedStrides = computeSuffixProduct(ArrayRef<int64_t>(
+ sourceShape.begin() + firstDimToCollapse, sourceShape.end()));
+
+ // Compute the collapsed offset.
+ ArrayRef<Value> indicesToCollapse(indices.begin() + firstDimToCollapse,
+ indices.end());
+ auto &&[collapsedExpr, collapsedVals] = computeLinearIndex(
+ collapsedOffset, collapsedStrides, indicesToCollapse);
+ collapsedOffset = affine::makeComposedFoldedAffineApply(
+ rewriter, loc, collapsedExpr, collapsedVals);
+
+ if (collapsedOffset.is<Value>()) {
+ collapsedIndices.push_back(collapsedOffset.get<Value>());
} else {
collapsedIndices.push_back(rewriter.create<arith::ConstantIndexOp>(
- loc, *getConstantIntValue(offset)));
+ loc, *getConstantIntValue(collapsedOffset)));
}
}
@@ -710,6 +714,7 @@ public:
firstContiguousInnerDim,
collapsedIndices)))
return failure();
+
Value collapsedSource =
collapseInnerDims(rewriter, loc, source, firstContiguousInnerDim);
MemRefType collapsedSourceType =
diff --git a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
index 9e8b240899d8..bbe10b0dcdd4 100644
--- a/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensor/Storage.cpp
@@ -74,13 +74,6 @@ MLIR_SPARSETENSOR_FOREVERY_FIXED_O(IMPL_GETCOORDINATES)
MLIR_SPARSETENSOR_FOREVERY_V(IMPL_GETVALUES)
#undef IMPL_GETVALUES
-#define IMPL_FORWARDINGINSERT(VNAME, V) \
- void SparseTensorStorageBase::forwardingInsert(const uint64_t *, V) { \
- FATAL_PIV("forwardingInsert" #VNAME); \
- }
-MLIR_SPARSETENSOR_FOREVERY_V(IMPL_FORWARDINGINSERT)
-#undef IMPL_FORWARDINGINSERT
-
#define IMPL_LEXINSERT(VNAME, V) \
void SparseTensorStorageBase::lexInsert(const uint64_t *, V) { \
FATAL_PIV("lexInsert" #VNAME); \
diff --git a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
index a5e75a77b4e4..0bc90b281b7c 100644
--- a/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/SparseTensorRuntime.cpp
@@ -117,20 +117,7 @@ extern "C" {
switch (action) { \
case Action::kEmpty: { \
return SparseTensorStorage<P, C, V>::newEmpty( \
- dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
- false); \
- } \
- case Action::kEmptyForward: { \
- return SparseTensorStorage<P, C, V>::newEmpty( \
- dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
- true); \
- } \
- case Action::kFromCOO: { \
- assert(ptr && "Received nullptr for SparseTensorCOO object"); \
- auto &coo = *static_cast<SparseTensorCOO<V> *>(ptr); \
- return SparseTensorStorage<P, C, V>::newFromCOO( \
- dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim, \
- coo); \
+ dimRank, dimSizes, lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim); \
} \
case Action::kFromReader: { \
assert(ptr && "Received nullptr for SparseTensorReader object"); \
@@ -138,11 +125,6 @@ extern "C" {
return static_cast<void *>(reader.readSparseTensor<P, C, V>( \
lvlRank, lvlSizes, lvlTypes, dim2lvl, lvl2dim)); \
} \
- case Action::kToCOO: { \
- assert(ptr && "Received nullptr for SparseTensorStorage object"); \
- auto &tensor = *static_cast<SparseTensorStorage<P, C, V> *>(ptr); \
- return tensor.toCOO(); \
- } \
case Action::kPack: { \
assert(ptr && "Received nullptr for SparseTensorStorage object"); \
intptr_t *buffers = static_cast<intptr_t *>(ptr); \
@@ -341,21 +323,6 @@ MLIR_SPARSETENSOR_FOREVERY_O(IMPL_SPARSECOORDINATES)
#undef IMPL_SPARSECOORDINATES
#undef IMPL_GETOVERHEAD
-#define IMPL_FORWARDINGINSERT(VNAME, V) \
- void _mlir_ciface_forwardingInsert##VNAME( \
- void *t, StridedMemRefType<V, 0> *vref, \
- StridedMemRefType<index_type, 1> *dimCoordsRef) { \
- assert(t &&vref); \
- ASSERT_NO_STRIDE(dimCoordsRef); \
- const index_type *dimCoords = MEMREF_GET_PAYLOAD(dimCoordsRef); \
- assert(dimCoords); \
- const V *value = MEMREF_GET_PAYLOAD(vref); \
- static_cast<SparseTensorStorageBase *>(t)->forwardingInsert(dimCoords, \
- *value); \
- }
-MLIR_SPARSETENSOR_FOREVERY_V(IMPL_FORWARDINGINSERT)
-#undef IMPL_FORWARDINGINSERT
-
#define IMPL_LEXINSERT(VNAME, V) \
void _mlir_ciface_lexInsert##VNAME( \
void *t, StridedMemRefType<index_type, 1> *lvlCoordsRef, \
@@ -427,8 +394,8 @@ void _mlir_ciface_getSparseTensorReaderDimSizes(
const uint64_t cSize = MEMREF_GET_USIZE(cref); \
const uint64_t vSize = MEMREF_GET_USIZE(vref); \
ASSERT_USIZE_EQ(lvl2dimRef, dimRank); \
- assert(cSize >= lvlRank * vSize); \
- assert(vSize >= reader.getNSE() && "Not enough space in buffers"); \
+ assert(cSize >= lvlRank * reader.getNSE()); \
+ assert(vSize >= reader.getNSE()); \
(void)dimRank; \
(void)cSize; \
(void)vSize; \
@@ -488,10 +455,6 @@ index_type sparseDimSize(void *tensor, index_type d) {
return static_cast<SparseTensorStorageBase *>(tensor)->getDimSize(d);
}
-void endForwardingInsert(void *tensor) {
- return static_cast<SparseTensorStorageBase *>(tensor)->endForwardingInsert();
-}
-
void endLexInsert(void *tensor) {
return static_cast<SparseTensorStorageBase *>(tensor)->endLexInsert();
}
@@ -500,13 +463,6 @@ void delSparseTensor(void *tensor) {
delete static_cast<SparseTensorStorageBase *>(tensor);
}
-#define IMPL_DELCOO(VNAME, V) \
- void delSparseTensorCOO##VNAME(void *coo) { \
- delete static_cast<SparseTensorCOO<V> *>(coo); \
- }
-MLIR_SPARSETENSOR_FOREVERY_V(IMPL_DELCOO)
-#undef IMPL_DELCOO
-
char *getTensorFilename(index_type id) {
constexpr size_t bufSize = 80;
char var[bufSize];
diff --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
index 652129523009..c631617f9735 100644
--- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp
+++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
@@ -99,21 +99,31 @@ DIFileAttr DebugImporter::translateImpl(llvm::DIFile *node) {
}
DILabelAttr DebugImporter::translateImpl(llvm::DILabel *node) {
- return DILabelAttr::get(context, translate(node->getScope()),
+ // Return nullptr if the scope or type is a cyclic dependency.
+ DIScopeAttr scope = translate(node->getScope());
+ if (node->getScope() && !scope)
+ return nullptr;
+ return DILabelAttr::get(context, scope,
getStringAttrOrNull(node->getRawName()),
translate(node->getFile()), node->getLine());
}
DILexicalBlockAttr DebugImporter::translateImpl(llvm::DILexicalBlock *node) {
- return DILexicalBlockAttr::get(context, translate(node->getScope()),
- translate(node->getFile()), node->getLine(),
- node->getColumn());
+ // Return nullptr if the scope or type is a cyclic dependency.
+ DIScopeAttr scope = translate(node->getScope());
+ if (node->getScope() && !scope)
+ return nullptr;
+ return DILexicalBlockAttr::get(context, scope, translate(node->getFile()),
+ node->getLine(), node->getColumn());
}
DILexicalBlockFileAttr
DebugImporter::translateImpl(llvm::DILexicalBlockFile *node) {
- return DILexicalBlockFileAttr::get(context, translate(node->getScope()),
- translate(node->getFile()),
+ // Return nullptr if the scope or type is a cyclic dependency.
+ DIScopeAttr scope = translate(node->getScope());
+ if (node->getScope() && !scope)
+ return nullptr;
+ return DILexicalBlockFileAttr::get(context, scope, translate(node->getFile()),
node->getDiscriminator());
}
@@ -135,11 +145,14 @@ DebugImporter::translateImpl(llvm::DIGlobalVariable *node) {
}
DILocalVariableAttr DebugImporter::translateImpl(llvm::DILocalVariable *node) {
- return DILocalVariableAttr::get(context, translate(node->getScope()),
- getStringAttrOrNull(node->getRawName()),
- translate(node->getFile()), node->getLine(),
- node->getArg(), node->getAlignInBits(),
- translate(node->getType()));
+ // Return nullptr if the scope or type is a cyclic dependency.
+ DIScopeAttr scope = translate(node->getScope());
+ if (node->getScope() && !scope)
+ return nullptr;
+ return DILocalVariableAttr::get(
+ context, scope, getStringAttrOrNull(node->getRawName()),
+ translate(node->getFile()), node->getLine(), node->getArg(),
+ node->getAlignInBits(), translate(node->getType()));
}
DIScopeAttr DebugImporter::translateImpl(llvm::DIScope *node) {
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index 97ccb2b29f3a..d63ea12ecd49 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -1966,6 +1966,13 @@ ModuleImport::processDebugIntrinsic(llvm::DbgVariableIntrinsic *dbgIntr,
// TODO: find a way to support this case.
if (isMetadataKillLocation(dbgIntr))
return emitUnsupportedWarning();
+ // Drop debug intrinsics if the associated variable information cannot be
+ // translated due to cyclic debug metadata.
+ // TODO: Support cyclic debug metadata.
+ DILocalVariableAttr localVariableAttr =
+ matchLocalVariableAttr(dbgIntr->getArgOperand(1));
+ if (!localVariableAttr)
+ return emitUnsupportedWarning();
FailureOr<Value> argOperand = convertMetadataValue(dbgIntr->getArgOperand(0));
if (failed(argOperand))
return emitError(loc) << "failed to convert a debug intrinsic operand: "
@@ -1991,8 +1998,6 @@ ModuleImport::processDebugIntrinsic(llvm::DbgVariableIntrinsic *dbgIntr,
} else {
builder.setInsertionPointAfterValue(*argOperand);
}
- DILocalVariableAttr localVariableAttr =
- matchLocalVariableAttr(dbgIntr->getArgOperand(1));
auto locationExprAttr =
debugImporter->translateExpression(dbgIntr->getExpression());
Operation *op =
diff --git a/mlir/lib/Transforms/Utils/DialectConversion.cpp b/mlir/lib/Transforms/Utils/DialectConversion.cpp
index 4989ddc3ec94..857b601acbc3 100644
--- a/mlir/lib/Transforms/Utils/DialectConversion.cpp
+++ b/mlir/lib/Transforms/Utils/DialectConversion.cpp
@@ -152,519 +152,25 @@ namespace {
/// This class contains a snapshot of the current conversion rewriter state.
/// This is useful when saving and undoing a set of rewrites.
struct RewriterState {
- RewriterState(unsigned numCreatedOps, unsigned numUnresolvedMaterializations,
- unsigned numReplacements, unsigned numArgReplacements,
- unsigned numRewrites, unsigned numIgnoredOperations)
- : numCreatedOps(numCreatedOps),
- numUnresolvedMaterializations(numUnresolvedMaterializations),
- numReplacements(numReplacements),
- numArgReplacements(numArgReplacements), numRewrites(numRewrites),
- numIgnoredOperations(numIgnoredOperations) {}
-
- /// The current number of created operations.
- unsigned numCreatedOps;
-
- /// The current number of unresolved materializations.
- unsigned numUnresolvedMaterializations;
-
- /// The current number of replacements queued.
- unsigned numReplacements;
-
- /// The current number of argument replacements queued.
- unsigned numArgReplacements;
+ RewriterState(unsigned numRewrites, unsigned numIgnoredOperations,
+ unsigned numErased)
+ : numRewrites(numRewrites), numIgnoredOperations(numIgnoredOperations),
+ numErased(numErased) {}
/// The current number of rewrites performed.
unsigned numRewrites;
/// The current number of ignored operations.
unsigned numIgnoredOperations;
-};
-
-//===----------------------------------------------------------------------===//
-// OpReplacement
-
-/// This class represents one requested operation replacement via 'replaceOp' or
-/// 'eraseOp`.
-struct OpReplacement {
- OpReplacement(const TypeConverter *converter = nullptr)
- : converter(converter) {}
-
- /// An optional type converter that can be used to materialize conversions
- /// between the new and old values if necessary.
- const TypeConverter *converter;
-};
-
-//===----------------------------------------------------------------------===//
-// UnresolvedMaterialization
-
-/// This class represents an unresolved materialization, i.e. a materialization
-/// that was inserted during conversion that needs to be legalized at the end of
-/// the conversion process.
-class UnresolvedMaterialization {
-public:
- /// The type of materialization.
- enum Kind {
- /// This materialization materializes a conversion for an illegal block
- /// argument type, to a legal one.
- Argument,
-
- /// This materialization materializes a conversion from an illegal type to a
- /// legal one.
- Target
- };
-
- UnresolvedMaterialization(UnrealizedConversionCastOp op = nullptr,
- const TypeConverter *converter = nullptr,
- Kind kind = Target, Type origOutputType = nullptr)
- : op(op), converterAndKind(converter, kind),
- origOutputType(origOutputType) {}
-
- /// Return the temporary conversion operation inserted for this
- /// materialization.
- UnrealizedConversionCastOp getOp() const { return op; }
- /// Return the type converter of this materialization (which may be null).
- const TypeConverter *getConverter() const {
- return converterAndKind.getPointer();
- }
-
- /// Return the kind of this materialization.
- Kind getKind() const { return converterAndKind.getInt(); }
-
- /// Set the kind of this materialization.
- void setKind(Kind kind) { converterAndKind.setInt(kind); }
-
- /// Return the original illegal output type of the input values.
- Type getOrigOutputType() const { return origOutputType; }
-
-private:
- /// The unresolved materialization operation created during conversion.
- UnrealizedConversionCastOp op;
-
- /// The corresponding type converter to use when resolving this
- /// materialization, and the kind of this materialization.
- llvm::PointerIntPair<const TypeConverter *, 1, Kind> converterAndKind;
-
- /// The original output type. This is only used for argument conversions.
- Type origOutputType;
+ /// The current number of erased operations/blocks.
+ unsigned numErased;
};
-} // namespace
-
-/// Build an unresolved materialization operation given an output type and set
-/// of input operands.
-static Value buildUnresolvedMaterialization(
- UnresolvedMaterialization::Kind kind, Block *insertBlock,
- Block::iterator insertPt, Location loc, ValueRange inputs, Type outputType,
- Type origOutputType, const TypeConverter *converter,
- SmallVectorImpl<UnresolvedMaterialization> &unresolvedMaterializations) {
- // Avoid materializing an unnecessary cast.
- if (inputs.size() == 1 && inputs.front().getType() == outputType)
- return inputs.front();
-
- // Create an unresolved materialization. We use a new OpBuilder to avoid
- // tracking the materialization like we do for other operations.
- OpBuilder builder(insertBlock, insertPt);
- auto convertOp =
- builder.create<UnrealizedConversionCastOp>(loc, outputType, inputs);
- unresolvedMaterializations.emplace_back(convertOp, converter, kind,
- origOutputType);
- return convertOp.getResult(0);
-}
-static Value buildUnresolvedArgumentMaterialization(
- PatternRewriter &rewriter, Location loc, ValueRange inputs,
- Type origOutputType, Type outputType, const TypeConverter *converter,
- SmallVectorImpl<UnresolvedMaterialization> &unresolvedMaterializations) {
- return buildUnresolvedMaterialization(
- UnresolvedMaterialization::Argument, rewriter.getInsertionBlock(),
- rewriter.getInsertionPoint(), loc, inputs, outputType, origOutputType,
- converter, unresolvedMaterializations);
-}
-static Value buildUnresolvedTargetMaterialization(
- Location loc, Value input, Type outputType, const TypeConverter *converter,
- SmallVectorImpl<UnresolvedMaterialization> &unresolvedMaterializations) {
- Block *insertBlock = input.getParentBlock();
- Block::iterator insertPt = insertBlock->begin();
- if (OpResult inputRes = dyn_cast<OpResult>(input))
- insertPt = ++inputRes.getOwner()->getIterator();
-
- return buildUnresolvedMaterialization(
- UnresolvedMaterialization::Target, insertBlock, insertPt, loc, input,
- outputType, outputType, converter, unresolvedMaterializations);
-}
-
-//===----------------------------------------------------------------------===//
-// ArgConverter
-//===----------------------------------------------------------------------===//
-namespace {
-/// This class provides a simple interface for converting the types of block
-/// arguments. This is done by creating a new block that contains the new legal
-/// types and extracting the block that contains the old illegal types to allow
-/// for undoing pending rewrites in the case of failure.
-struct ArgConverter {
- ArgConverter(
- PatternRewriter &rewriter,
- SmallVectorImpl<UnresolvedMaterialization> &unresolvedMaterializations)
- : rewriter(rewriter),
- unresolvedMaterializations(unresolvedMaterializations) {}
-
- /// This structure contains the information pertaining to an argument that has
- /// been converted.
- struct ConvertedArgInfo {
- ConvertedArgInfo(unsigned newArgIdx, unsigned newArgSize,
- Value castValue = nullptr)
- : newArgIdx(newArgIdx), newArgSize(newArgSize), castValue(castValue) {}
-
- /// The start index of in the new argument list that contains arguments that
- /// replace the original.
- unsigned newArgIdx;
-
- /// The number of arguments that replaced the original argument.
- unsigned newArgSize;
-
- /// The cast value that was created to cast from the new arguments to the
- /// old. This only used if 'newArgSize' > 1.
- Value castValue;
- };
-
- /// This structure contains information pertaining to a block that has had its
- /// signature converted.
- struct ConvertedBlockInfo {
- ConvertedBlockInfo(Block *origBlock, const TypeConverter *converter)
- : origBlock(origBlock), converter(converter) {}
-
- /// The original block that was requested to have its signature converted.
- Block *origBlock;
-
- /// The conversion information for each of the arguments. The information is
- /// std::nullopt if the argument was dropped during conversion.
- SmallVector<std::optional<ConvertedArgInfo>, 1> argInfo;
-
- /// The type converter used to convert the arguments.
- const TypeConverter *converter;
- };
-
- //===--------------------------------------------------------------------===//
- // Rewrite Application
- //===--------------------------------------------------------------------===//
-
- /// Erase any rewrites registered for the blocks within the given operation
- /// which is about to be removed. This merely drops the rewrites without
- /// undoing them.
- void notifyOpRemoved(Operation *op);
-
- /// Cleanup and undo any generated conversions for the arguments of block.
- /// This method replaces the new block with the original, reverting the IR to
- /// its original state.
- void discardRewrites(Block *block);
-
- /// Fully replace uses of the old arguments with the new.
- void applyRewrites(ConversionValueMapping &mapping);
-
- /// Materialize any necessary conversions for converted arguments that have
- /// live users, using the provided `findLiveUser` to search for a user that
- /// survives the conversion process.
- LogicalResult
- materializeLiveConversions(ConversionValueMapping &mapping,
- OpBuilder &builder,
- function_ref<Operation *(Value)> findLiveUser);
-
- //===--------------------------------------------------------------------===//
- // Conversion
- //===--------------------------------------------------------------------===//
-
- /// Attempt to convert the signature of the given block, if successful a new
- /// block is returned containing the new arguments. Returns `block` if it did
- /// not require conversion.
- FailureOr<Block *>
- convertSignature(Block *block, const TypeConverter *converter,
- ConversionValueMapping &mapping,
- SmallVectorImpl<BlockArgument> &argReplacements);
-
- /// Apply the given signature conversion on the given block. The new block
- /// containing the updated signature is returned. If no conversions were
- /// necessary, e.g. if the block has no arguments, `block` is returned.
- /// `converter` is used to generate any necessary cast operations that
- /// translate between the origin argument types and those specified in the
- /// signature conversion.
- Block *applySignatureConversion(
- Block *block, const TypeConverter *converter,
- TypeConverter::SignatureConversion &signatureConversion,
- ConversionValueMapping &mapping,
- SmallVectorImpl<BlockArgument> &argReplacements);
-
- /// A collection of blocks that have had their arguments converted. This is a
- /// map from the new replacement block, back to the original block.
- llvm::MapVector<Block *, ConvertedBlockInfo> conversionInfo;
-
- /// The pattern rewriter to use when materializing conversions.
- PatternRewriter &rewriter;
-
- /// An ordered set of unresolved materializations during conversion.
- SmallVectorImpl<UnresolvedMaterialization> &unresolvedMaterializations;
-};
-} // namespace
-
-//===----------------------------------------------------------------------===//
-// Rewrite Application
-
-void ArgConverter::notifyOpRemoved(Operation *op) {
- if (conversionInfo.empty())
- return;
-
- for (Region &region : op->getRegions()) {
- for (Block &block : region) {
- // Drop any rewrites from within.
- for (Operation &nestedOp : block)
- if (nestedOp.getNumRegions())
- notifyOpRemoved(&nestedOp);
-
- // Check if this block was converted.
- auto *it = conversionInfo.find(&block);
- if (it == conversionInfo.end())
- continue;
-
- // Drop all uses of the original arguments and delete the original block.
- Block *origBlock = it->second.origBlock;
- for (BlockArgument arg : origBlock->getArguments())
- arg.dropAllUses();
- conversionInfo.erase(it);
- }
- }
-}
-
-void ArgConverter::discardRewrites(Block *block) {
- auto *it = conversionInfo.find(block);
- if (it == conversionInfo.end())
- return;
- Block *origBlock = it->second.origBlock;
-
- // Drop all uses of the new block arguments and replace uses of the new block.
- for (int i = block->getNumArguments() - 1; i >= 0; --i)
- block->getArgument(i).dropAllUses();
- block->replaceAllUsesWith(origBlock);
-
- // Move the operations back the original block, move the original block back
- // into its original location and the delete the new block.
- origBlock->getOperations().splice(origBlock->end(), block->getOperations());
- block->getParent()->getBlocks().insert(Region::iterator(block), origBlock);
- block->erase();
-
- conversionInfo.erase(it);
-}
-
-void ArgConverter::applyRewrites(ConversionValueMapping &mapping) {
- for (auto &info : conversionInfo) {
- ConvertedBlockInfo &blockInfo = info.second;
- Block *origBlock = blockInfo.origBlock;
-
- // Process the remapping for each of the original arguments.
- for (unsigned i = 0, e = origBlock->getNumArguments(); i != e; ++i) {
- std::optional<ConvertedArgInfo> &argInfo = blockInfo.argInfo[i];
- BlockArgument origArg = origBlock->getArgument(i);
-
- // Handle the case of a 1->0 value mapping.
- if (!argInfo) {
- if (Value newArg = mapping.lookupOrNull(origArg, origArg.getType()))
- origArg.replaceAllUsesWith(newArg);
- continue;
- }
-
- // Otherwise this is a 1->1+ value mapping.
- Value castValue = argInfo->castValue;
- assert(argInfo->newArgSize >= 1 && castValue && "expected 1->1+ mapping");
-
- // If the argument is still used, replace it with the generated cast.
- if (!origArg.use_empty()) {
- origArg.replaceAllUsesWith(
- mapping.lookupOrDefault(castValue, origArg.getType()));
- }
- }
-
- delete origBlock;
- blockInfo.origBlock = nullptr;
- }
-}
-
-LogicalResult ArgConverter::materializeLiveConversions(
- ConversionValueMapping &mapping, OpBuilder &builder,
- function_ref<Operation *(Value)> findLiveUser) {
- for (auto &info : conversionInfo) {
- Block *newBlock = info.first;
- ConvertedBlockInfo &blockInfo = info.second;
- Block *origBlock = blockInfo.origBlock;
-
- // Process the remapping for each of the original arguments.
- for (unsigned i = 0, e = origBlock->getNumArguments(); i != e; ++i) {
- // If the type of this argument changed and the argument is still live, we
- // need to materialize a conversion.
- BlockArgument origArg = origBlock->getArgument(i);
- if (mapping.lookupOrNull(origArg, origArg.getType()))
- continue;
- Operation *liveUser = findLiveUser(origArg);
- if (!liveUser)
- continue;
-
- Value replacementValue = mapping.lookupOrDefault(origArg);
- bool isDroppedArg = replacementValue == origArg;
- if (isDroppedArg)
- rewriter.setInsertionPointToStart(newBlock);
- else
- rewriter.setInsertionPointAfterValue(replacementValue);
- Value newArg;
- if (blockInfo.converter) {
- newArg = blockInfo.converter->materializeSourceConversion(
- rewriter, origArg.getLoc(), origArg.getType(),
- isDroppedArg ? ValueRange() : ValueRange(replacementValue));
- assert((!newArg || newArg.getType() == origArg.getType()) &&
- "materialization hook did not provide a value of the expected "
- "type");
- }
- if (!newArg) {
- InFlightDiagnostic diag =
- emitError(origArg.getLoc())
- << "failed to materialize conversion for block argument #" << i
- << " that remained live after conversion, type was "
- << origArg.getType();
- if (!isDroppedArg)
- diag << ", with target type " << replacementValue.getType();
- diag.attachNote(liveUser->getLoc())
- << "see existing live user here: " << *liveUser;
- return failure();
- }
- mapping.map(origArg, newArg);
- }
- }
- return success();
-}
-
-//===----------------------------------------------------------------------===//
-// Conversion
-
-FailureOr<Block *> ArgConverter::convertSignature(
- Block *block, const TypeConverter *converter,
- ConversionValueMapping &mapping,
- SmallVectorImpl<BlockArgument> &argReplacements) {
- // Check if the block was already converted.
- // * If the block is mapped in `conversionInfo`, it is a converted block.
- // * If the block is detached, conservatively assume that it is going to be
- // deleted; it is likely the old block (before it was converted).
- if (conversionInfo.count(block) || !block->getParent())
- return block;
- // If a converter wasn't provided, and the block wasn't already converted,
- // there is nothing we can do.
- if (!converter)
- return failure();
-
- // Try to convert the signature for the block with the provided converter.
- if (auto conversion = converter->convertBlockSignature(block))
- return applySignatureConversion(block, converter, *conversion, mapping,
- argReplacements);
- return failure();
-}
-
-Block *ArgConverter::applySignatureConversion(
- Block *block, const TypeConverter *converter,
- TypeConverter::SignatureConversion &signatureConversion,
- ConversionValueMapping &mapping,
- SmallVectorImpl<BlockArgument> &argReplacements) {
- // If no arguments are being changed or added, there is nothing to do.
- unsigned origArgCount = block->getNumArguments();
- auto convertedTypes = signatureConversion.getConvertedTypes();
- if (origArgCount == 0 && convertedTypes.empty())
- return block;
-
- // Split the block at the beginning to get a new block to use for the updated
- // signature.
- Block *newBlock = block->splitBlock(block->begin());
- block->replaceAllUsesWith(newBlock);
- // Unlink the block, but do not erase it yet, so that the change can be rolled
- // back.
- block->getParent()->getBlocks().remove(block);
-
- // Map all new arguments to the location of the argument they originate from.
- SmallVector<Location> newLocs(convertedTypes.size(),
- rewriter.getUnknownLoc());
- for (unsigned i = 0; i < origArgCount; ++i) {
- auto inputMap = signatureConversion.getInputMapping(i);
- if (!inputMap || inputMap->replacementValue)
- continue;
- Location origLoc = block->getArgument(i).getLoc();
- for (unsigned j = 0; j < inputMap->size; ++j)
- newLocs[inputMap->inputNo + j] = origLoc;
- }
-
- SmallVector<Value, 4> newArgRange(
- newBlock->addArguments(convertedTypes, newLocs));
- ArrayRef<Value> newArgs(newArgRange);
-
- // Remap each of the original arguments as determined by the signature
- // conversion.
- ConvertedBlockInfo info(block, converter);
- info.argInfo.resize(origArgCount);
-
- OpBuilder::InsertionGuard guard(rewriter);
- rewriter.setInsertionPointToStart(newBlock);
- for (unsigned i = 0; i != origArgCount; ++i) {
- auto inputMap = signatureConversion.getInputMapping(i);
- if (!inputMap)
- continue;
- BlockArgument origArg = block->getArgument(i);
-
- // If inputMap->replacementValue is not nullptr, then the argument is
- // dropped and a replacement value is provided to be the remappedValue.
- if (inputMap->replacementValue) {
- assert(inputMap->size == 0 &&
- "invalid to provide a replacement value when the argument isn't "
- "dropped");
- mapping.map(origArg, inputMap->replacementValue);
- argReplacements.push_back(origArg);
- continue;
- }
-
- // Otherwise, this is a 1->1+ mapping.
- auto replArgs = newArgs.slice(inputMap->inputNo, inputMap->size);
- Value newArg;
-
- // If this is a 1->1 mapping and the types of new and replacement arguments
- // match (i.e. it's an identity map), then the argument is mapped to its
- // original type.
- // FIXME: We simply pass through the replacement argument if there wasn't a
- // converter, which isn't great as it allows implicit type conversions to
- // appear. We should properly restructure this code to handle cases where a
- // converter isn't provided and also to properly handle the case where an
- // argument materialization is actually a temporary source materialization
- // (e.g. in the case of 1->N).
- if (replArgs.size() == 1 &&
- (!converter || replArgs[0].getType() == origArg.getType())) {
- newArg = replArgs.front();
- } else {
- Type origOutputType = origArg.getType();
-
- // Legalize the argument output type.
- Type outputType = origOutputType;
- if (Type legalOutputType = converter->convertType(outputType))
- outputType = legalOutputType;
-
- newArg = buildUnresolvedArgumentMaterialization(
- rewriter, origArg.getLoc(), replArgs, origOutputType, outputType,
- converter, unresolvedMaterializations);
- }
-
- mapping.map(origArg, newArg);
- argReplacements.push_back(origArg);
- info.argInfo[i] =
- ConvertedArgInfo(inputMap->inputNo, inputMap->size, newArg);
- }
-
- conversionInfo.insert({newBlock, std::move(info)});
- return newBlock;
-}
//===----------------------------------------------------------------------===//
// IR rewrites
//===----------------------------------------------------------------------===//
-namespace {
/// An IR rewrite that can be committed (upon success) or rolled back (upon
/// failure).
///
@@ -685,19 +191,29 @@ public:
MoveBlock,
SplitBlock,
BlockTypeConversion,
+ ReplaceBlockArg,
// Operation rewrites
MoveOperation,
- ModifyOperation
+ ModifyOperation,
+ ReplaceOperation,
+ CreateOperation,
+ UnresolvedMaterialization
};
virtual ~IRRewrite() = default;
- /// Roll back the rewrite.
+ /// Roll back the rewrite. Operations may be erased during rollback.
virtual void rollback() = 0;
- /// Commit the rewrite.
+ /// Commit the rewrite. Operations may be unlinked from their blocks during
+ /// the commit phase, but they must not be erased yet. This is because
+ /// internal dialect conversion state (such as `mapping`) may still be using
+ /// them. Operations must be erased during cleanup.
virtual void commit() {}
+ /// Cleanup operations. Cleanup is called after commit.
+ virtual void cleanup() {}
+
Kind getKind() const { return kind; }
static bool classof(const IRRewrite *rewrite) { return true; }
@@ -706,6 +222,14 @@ protected:
IRRewrite(Kind kind, ConversionPatternRewriterImpl &rewriterImpl)
: kind(kind), rewriterImpl(rewriterImpl) {}
+ /// Erase the given op (unless it was already erased).
+ void eraseOp(Operation *op);
+
+ /// Erase the given block (unless it was already erased).
+ void eraseBlock(Block *block);
+
+ const ConversionConfig &getConfig() const;
+
const Kind kind;
ConversionPatternRewriterImpl &rewriterImpl;
};
@@ -718,7 +242,7 @@ public:
static bool classof(const IRRewrite *rewrite) {
return rewrite->getKind() >= Kind::CreateBlock &&
- rewrite->getKind() <= Kind::BlockTypeConversion;
+ rewrite->getKind() <= Kind::ReplaceBlockArg;
}
protected:
@@ -748,8 +272,10 @@ public:
auto &blockOps = block->getOperations();
while (!blockOps.empty())
blockOps.remove(blockOps.begin());
- block->dropAllDefinedValueUses();
- block->erase();
+ if (block->getParent())
+ eraseBlock(block);
+ else
+ delete block;
}
};
@@ -787,6 +313,8 @@ public:
void commit() override {
// Erase the block.
assert(block && "expected block");
+ assert(block->empty() && "expected empty block");
+ block->dropAllDefinedValueUses();
delete block;
block = nullptr;
}
@@ -885,8 +413,7 @@ public:
// Merge back the block that was split out.
originalBlock->getOperations().splice(originalBlock->end(),
block->getOperations());
- block->dropAllDefinedValueUses();
- block->erase();
+ eraseBlock(block);
}
private:
@@ -894,20 +421,80 @@ private:
Block *originalBlock;
};
+/// This structure contains the information pertaining to an argument that has
+/// been converted.
+struct ConvertedArgInfo {
+ ConvertedArgInfo(unsigned newArgIdx, unsigned newArgSize,
+ Value castValue = nullptr)
+ : newArgIdx(newArgIdx), newArgSize(newArgSize), castValue(castValue) {}
+
+ /// The start index of in the new argument list that contains arguments that
+ /// replace the original.
+ unsigned newArgIdx;
+
+ /// The number of arguments that replaced the original argument.
+ unsigned newArgSize;
+
+ /// The cast value that was created to cast from the new arguments to the
+ /// old. This only used if 'newArgSize' > 1.
+ Value castValue;
+};
+
/// Block type conversion. This rewrite is partially reflected in the IR.
class BlockTypeConversionRewrite : public BlockRewrite {
public:
- BlockTypeConversionRewrite(ConversionPatternRewriterImpl &rewriterImpl,
- Block *block)
- : BlockRewrite(Kind::BlockTypeConversion, rewriterImpl, block) {}
+ BlockTypeConversionRewrite(
+ ConversionPatternRewriterImpl &rewriterImpl, Block *block,
+ Block *origBlock, SmallVector<std::optional<ConvertedArgInfo>, 1> argInfo,
+ const TypeConverter *converter)
+ : BlockRewrite(Kind::BlockTypeConversion, rewriterImpl, block),
+ origBlock(origBlock), argInfo(argInfo), converter(converter) {}
static bool classof(const IRRewrite *rewrite) {
return rewrite->getKind() == Kind::BlockTypeConversion;
}
- // TODO: Block type conversions are currently committed in
- // `ArgConverter::applyRewrites`. This should be done in the "commit" method.
+ /// Materialize any necessary conversions for converted arguments that have
+ /// live users, using the provided `findLiveUser` to search for a user that
+ /// survives the conversion process.
+ LogicalResult
+ materializeLiveConversions(function_ref<Operation *(Value)> findLiveUser);
+
+ void commit() override;
+
void rollback() override;
+
+private:
+ /// The original block that was requested to have its signature converted.
+ Block *origBlock;
+
+ /// The conversion information for each of the arguments. The information is
+ /// std::nullopt if the argument was dropped during conversion.
+ SmallVector<std::optional<ConvertedArgInfo>, 1> argInfo;
+
+ /// The type converter used to convert the arguments.
+ const TypeConverter *converter;
+};
+
+/// Replacing a block argument. This rewrite is not immediately reflected in the
+/// IR. An internal IR mapping is updated, but the actual replacement is delayed
+/// until the rewrite is committed.
+class ReplaceBlockArgRewrite : public BlockRewrite {
+public:
+ ReplaceBlockArgRewrite(ConversionPatternRewriterImpl &rewriterImpl,
+ Block *block, BlockArgument arg)
+ : BlockRewrite(Kind::ReplaceBlockArg, rewriterImpl, block), arg(arg) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::ReplaceBlockArg;
+ }
+
+ void commit() override;
+
+ void rollback() override;
+
+private:
+ BlockArgument arg;
};
/// An operation rewrite.
@@ -918,7 +505,7 @@ public:
static bool classof(const IRRewrite *rewrite) {
return rewrite->getKind() >= Kind::MoveOperation &&
- rewrite->getKind() <= Kind::ModifyOperation;
+ rewrite->getKind() <= Kind::UnresolvedMaterialization;
}
protected:
@@ -953,8 +540,8 @@ private:
// The block in which this operation was previously contained.
Block *block;
- // The original successor of this operation before it was moved. "nullptr" if
- // this operation was the only operation in the region.
+ // The original successor of this operation before it was moved. "nullptr"
+ // if this operation was the only operation in the region.
Operation *insertBeforeOp;
};
@@ -1019,6 +606,118 @@ private:
SmallVector<Block *, 2> successors;
void *propertiesStorage = nullptr;
};
+
+/// Replacing an operation. Erasing an operation is treated as a special case
+/// with "null" replacements. This rewrite is not immediately reflected in the
+/// IR. An internal IR mapping is updated, but values are not replaced and the
+/// original op is not erased until the rewrite is committed.
+class ReplaceOperationRewrite : public OperationRewrite {
+public:
+ ReplaceOperationRewrite(ConversionPatternRewriterImpl &rewriterImpl,
+ Operation *op, const TypeConverter *converter,
+ bool changedResults)
+ : OperationRewrite(Kind::ReplaceOperation, rewriterImpl, op),
+ converter(converter), changedResults(changedResults) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::ReplaceOperation;
+ }
+
+ void commit() override;
+
+ void rollback() override;
+
+ void cleanup() override;
+
+ const TypeConverter *getConverter() const { return converter; }
+
+ bool hasChangedResults() const { return changedResults; }
+
+private:
+ /// An optional type converter that can be used to materialize conversions
+ /// between the new and old values if necessary.
+ const TypeConverter *converter;
+
+ /// A boolean flag that indicates whether result types have changed or not.
+ bool changedResults;
+};
+
+class CreateOperationRewrite : public OperationRewrite {
+public:
+ CreateOperationRewrite(ConversionPatternRewriterImpl &rewriterImpl,
+ Operation *op)
+ : OperationRewrite(Kind::CreateOperation, rewriterImpl, op) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::CreateOperation;
+ }
+
+ void rollback() override;
+};
+
+/// The type of materialization.
+enum MaterializationKind {
+ /// This materialization materializes a conversion for an illegal block
+ /// argument type, to a legal one.
+ Argument,
+
+ /// This materialization materializes a conversion from an illegal type to a
+ /// legal one.
+ Target
+};
+
+/// An unresolved materialization, i.e., a "builtin.unrealized_conversion_cast"
+/// op. Unresolved materializations are erased at the end of the dialect
+/// conversion.
+class UnresolvedMaterializationRewrite : public OperationRewrite {
+public:
+ UnresolvedMaterializationRewrite(
+ ConversionPatternRewriterImpl &rewriterImpl,
+ UnrealizedConversionCastOp op, const TypeConverter *converter = nullptr,
+ MaterializationKind kind = MaterializationKind::Target,
+ Type origOutputType = nullptr)
+ : OperationRewrite(Kind::UnresolvedMaterialization, rewriterImpl, op),
+ converterAndKind(converter, kind), origOutputType(origOutputType) {}
+
+ static bool classof(const IRRewrite *rewrite) {
+ return rewrite->getKind() == Kind::UnresolvedMaterialization;
+ }
+
+ UnrealizedConversionCastOp getOperation() const {
+ return cast<UnrealizedConversionCastOp>(op);
+ }
+
+ void rollback() override;
+
+ void cleanup() override;
+
+ /// Return the type converter of this materialization (which may be null).
+ const TypeConverter *getConverter() const {
+ return converterAndKind.getPointer();
+ }
+
+ /// Return the kind of this materialization.
+ MaterializationKind getMaterializationKind() const {
+ return converterAndKind.getInt();
+ }
+
+ /// Set the kind of this materialization.
+ void setMaterializationKind(MaterializationKind kind) {
+ converterAndKind.setInt(kind);
+ }
+
+ /// Return the original illegal output type of the input values.
+ Type getOrigOutputType() const { return origOutputType; }
+
+private:
+ /// The corresponding type converter to use when resolving this
+ /// materialization, and the kind of this materialization.
+ llvm::PointerIntPair<const TypeConverter *, 1, MaterializationKind>
+ converterAndKind;
+
+ /// The original output type. This is only used for argument conversions.
+ Type origOutputType;
+};
} // namespace
/// Return "true" if there is an operation rewrite that matches the specified
@@ -1031,23 +730,35 @@ static bool hasRewrite(R &&rewrites, Operation *op) {
});
}
+/// Find the single rewrite object of the specified type and block among the
+/// given rewrites. In debug mode, asserts that there is mo more than one such
+/// object. Return "nullptr" if no object was found.
+template <typename RewriteTy, typename R>
+static RewriteTy *findSingleRewrite(R &&rewrites, Block *block) {
+ RewriteTy *result = nullptr;
+ for (auto &rewrite : rewrites) {
+ auto *rewriteTy = dyn_cast<RewriteTy>(rewrite.get());
+ if (rewriteTy && rewriteTy->getBlock() == block) {
+#ifndef NDEBUG
+ assert(!result && "expected single matching rewrite");
+ result = rewriteTy;
+#else
+ return rewriteTy;
+#endif // NDEBUG
+ }
+ }
+ return result;
+}
+
//===----------------------------------------------------------------------===//
// ConversionPatternRewriterImpl
//===----------------------------------------------------------------------===//
namespace mlir {
namespace detail {
struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
- explicit ConversionPatternRewriterImpl(PatternRewriter &rewriter)
- : argConverter(rewriter, unresolvedMaterializations),
- notifyCallback(nullptr) {}
-
- /// Cleanup and destroy any generated rewrite operations. This method is
- /// invoked when the conversion process fails.
- void discardRewrites();
-
- /// Apply all requested operation rewrites. This method is invoked when the
- /// conversion process succeeds.
- void applyRewrites();
+ explicit ConversionPatternRewriterImpl(MLIRContext *ctx,
+ const ConversionConfig &config)
+ : eraseRewriter(ctx), config(config) {}
//===--------------------------------------------------------------------===//
// State Management
@@ -1056,6 +767,10 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
/// Return the current state of the rewriter.
RewriterState getCurrentState();
+ /// Apply all requested operation rewrites. This method is invoked when the
+ /// conversion process succeeds.
+ void applyRewrites();
+
/// Reset the state of the rewriter to a previously saved point.
void resetState(RewriterState state);
@@ -1092,11 +807,18 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
// Type Conversion
//===--------------------------------------------------------------------===//
- /// Convert the signature of the given block.
+ /// Attempt to convert the signature of the given block, if successful a new
+ /// block is returned containing the new arguments. Returns `block` if it did
+ /// not require conversion.
FailureOr<Block *> convertBlockSignature(
Block *block, const TypeConverter *converter,
TypeConverter::SignatureConversion *conversion = nullptr);
+ /// Convert the types of non-entry block arguments within the given region.
+ LogicalResult convertNonEntryRegionTypes(
+ Region *region, const TypeConverter &converter,
+ ArrayRef<TypeConverter::SignatureConversion> blockConversions = {});
+
/// Apply a signature conversion on the given region, using `converter` for
/// materializations if not null.
Block *
@@ -1109,10 +831,37 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
convertRegionTypes(Region *region, const TypeConverter &converter,
TypeConverter::SignatureConversion *entryConversion);
- /// Convert the types of non-entry block arguments within the given region.
- LogicalResult convertNonEntryRegionTypes(
- Region *region, const TypeConverter &converter,
- ArrayRef<TypeConverter::SignatureConversion> blockConversions = {});
+ /// Apply the given signature conversion on the given block. The new block
+ /// containing the updated signature is returned. If no conversions were
+ /// necessary, e.g. if the block has no arguments, `block` is returned.
+ /// `converter` is used to generate any necessary cast operations that
+ /// translate between the origin argument types and those specified in the
+ /// signature conversion.
+ Block *applySignatureConversion(
+ Block *block, const TypeConverter *converter,
+ TypeConverter::SignatureConversion &signatureConversion);
+
+ //===--------------------------------------------------------------------===//
+ // Materializations
+ //===--------------------------------------------------------------------===//
+ /// Build an unresolved materialization operation given an output type and set
+ /// of input operands.
+ Value buildUnresolvedMaterialization(MaterializationKind kind,
+ Block *insertBlock,
+ Block::iterator insertPt, Location loc,
+ ValueRange inputs, Type outputType,
+ Type origOutputType,
+ const TypeConverter *converter);
+
+ Value buildUnresolvedArgumentMaterialization(Block *block, Location loc,
+ ValueRange inputs,
+ Type origOutputType,
+ Type outputType,
+ const TypeConverter *converter);
+
+ Value buildUnresolvedTargetMaterialization(Location loc, Value input,
+ Type outputType,
+ const TypeConverter *converter);
//===--------------------------------------------------------------------===//
// Rewriter Notification Hooks
@@ -1145,28 +894,51 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
function_ref<void(Diagnostic &)> reasonCallback) override;
//===--------------------------------------------------------------------===//
- // State
+ // IR Erasure
//===--------------------------------------------------------------------===//
- // Mapping between replaced values that differ in type. This happens when
- // replacing a value with one of a different type.
- ConversionValueMapping mapping;
+ /// A rewriter that keeps track of erased ops and blocks. It ensures that no
+ /// operation or block is erased multiple times. This rewriter assumes that
+ /// no new IR is created between calls to `eraseOp`/`eraseBlock`.
+ struct SingleEraseRewriter : public RewriterBase, RewriterBase::Listener {
+ public:
+ SingleEraseRewriter(MLIRContext *context)
+ : RewriterBase(context, /*listener=*/this) {}
+
+ /// Erase the given op (unless it was already erased).
+ void eraseOp(Operation *op) override {
+ if (erased.contains(op))
+ return;
+ op->dropAllUses();
+ RewriterBase::eraseOp(op);
+ }
- /// Utility used to convert block arguments.
- ArgConverter argConverter;
+ /// Erase the given block (unless it was already erased).
+ void eraseBlock(Block *block) override {
+ if (erased.contains(block))
+ return;
+ assert(block->empty() && "expected empty block");
+ block->dropAllDefinedValueUses();
+ RewriterBase::eraseBlock(block);
+ }
- /// Ordered vector of all of the newly created operations during conversion.
- SmallVector<Operation *> createdOps;
+ void notifyOperationErased(Operation *op) override { erased.insert(op); }
+ void notifyBlockErased(Block *block) override { erased.insert(block); }
- /// Ordered vector of all unresolved type conversion materializations during
- /// conversion.
- SmallVector<UnresolvedMaterialization> unresolvedMaterializations;
+ /// Pointers to all erased operations and blocks.
+ SetVector<void *> erased;
+ };
+
+ //===--------------------------------------------------------------------===//
+ // State
+ //===--------------------------------------------------------------------===//
- /// Ordered map of requested operation replacements.
- llvm::MapVector<Operation *, OpReplacement> replacements;
+ /// This rewriter must be used for erasing ops/blocks.
+ SingleEraseRewriter eraseRewriter;
- /// Ordered vector of any requested block argument replacements.
- SmallVector<BlockArgument, 4> argReplacements;
+ // Mapping between replaced values that differ in type. This happens when
+ // replacing a value with one of a different type.
+ ConversionValueMapping mapping;
/// Ordered list of block operations (creations, splits, motions).
SmallVector<std::unique_ptr<IRRewrite>> rewrites;
@@ -1182,11 +954,6 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
/// operation was ignored.
SetVector<Operation *> ignoredOps;
- /// A vector of indices into `replacements` of operations that were replaced
- /// with values with different result types than the original operation, e.g.
- /// 1->N conversion of some kind.
- SmallVector<unsigned, 4> operationsWithChangedResults;
-
/// The current type converter, or nullptr if no type converter is currently
/// active.
const TypeConverter *currentTypeConverter = nullptr;
@@ -1195,8 +962,8 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
/// converting the arguments of blocks within that region.
DenseMap<Region *, const TypeConverter *> regionToConverter;
- /// This allows the user to collect the match failure message.
- function_ref<void(Diagnostic &)> notifyCallback;
+ /// Dialect conversion configuration.
+ const ConversionConfig &config;
#ifndef NDEBUG
/// A set of operations that have pending updates. This tracking isn't
@@ -1211,154 +978,193 @@ struct ConversionPatternRewriterImpl : public RewriterBase::Listener {
} // namespace detail
} // namespace mlir
-void BlockTypeConversionRewrite::rollback() {
- // Undo the type conversion.
- rewriterImpl.argConverter.discardRewrites(block);
-}
-
-/// Detach any operations nested in the given operation from their parent
-/// blocks, and erase the given operation. This can be used when the nested
-/// operations are scheduled for erasure themselves, so deleting the regions of
-/// the given operation together with their content would result in double-free.
-/// This happens, for example, when rolling back op creation in the reverse
-/// order and if the nested ops were created before the parent op. This function
-/// does not need to collect nested ops recursively because it is expected to
-/// also be called for each nested op when it is about to be deleted.
-static void detachNestedAndErase(Operation *op) {
- for (Region &region : op->getRegions()) {
- for (Block &block : region.getBlocks()) {
- while (!block.getOperations().empty())
- block.getOperations().remove(block.getOperations().begin());
- block.dropAllDefinedValueUses();
+void IRRewrite::eraseOp(Operation *op) {
+ rewriterImpl.eraseRewriter.eraseOp(op);
+}
+
+void IRRewrite::eraseBlock(Block *block) {
+ rewriterImpl.eraseRewriter.eraseBlock(block);
+}
+
+const ConversionConfig &IRRewrite::getConfig() const {
+ return rewriterImpl.config;
+}
+
+void BlockTypeConversionRewrite::commit() {
+ // Process the remapping for each of the original arguments.
+ for (auto [origArg, info] :
+ llvm::zip_equal(origBlock->getArguments(), argInfo)) {
+ // Handle the case of a 1->0 value mapping.
+ if (!info) {
+ if (Value newArg =
+ rewriterImpl.mapping.lookupOrNull(origArg, origArg.getType()))
+ origArg.replaceAllUsesWith(newArg);
+ continue;
+ }
+
+ // Otherwise this is a 1->1+ value mapping.
+ Value castValue = info->castValue;
+ assert(info->newArgSize >= 1 && castValue && "expected 1->1+ mapping");
+
+ // If the argument is still used, replace it with the generated cast.
+ if (!origArg.use_empty()) {
+ origArg.replaceAllUsesWith(
+ rewriterImpl.mapping.lookupOrDefault(castValue, origArg.getType()));
}
}
- op->dropAllUses();
- op->erase();
+
+ assert(origBlock->empty() && "expected empty block");
+ origBlock->dropAllDefinedValueUses();
+ delete origBlock;
+ origBlock = nullptr;
}
-void ConversionPatternRewriterImpl::discardRewrites() {
- undoRewrites();
+void BlockTypeConversionRewrite::rollback() {
+ // Drop all uses of the new block arguments and replace uses of the new block.
+ for (int i = block->getNumArguments() - 1; i >= 0; --i)
+ block->getArgument(i).dropAllUses();
+ block->replaceAllUsesWith(origBlock);
- // Remove any newly created ops.
- for (UnresolvedMaterialization &materialization : unresolvedMaterializations)
- detachNestedAndErase(materialization.getOp());
- for (auto *op : llvm::reverse(createdOps))
- detachNestedAndErase(op);
+ // Move the operations back the original block, move the original block back
+ // into its original location and the delete the new block.
+ origBlock->getOperations().splice(origBlock->end(), block->getOperations());
+ block->getParent()->getBlocks().insert(Region::iterator(block), origBlock);
+ eraseBlock(block);
}
-void ConversionPatternRewriterImpl::applyRewrites() {
- // Apply all of the rewrites replacements requested during conversion.
- for (auto &repl : replacements) {
- for (OpResult result : repl.first->getResults())
- if (Value newValue = mapping.lookupOrNull(result, result.getType()))
- result.replaceAllUsesWith(newValue);
-
- // If this operation defines any regions, drop any pending argument
- // rewrites.
- if (repl.first->getNumRegions())
- argConverter.notifyOpRemoved(repl.first);
- }
-
- // Apply all of the requested argument replacements.
- for (BlockArgument arg : argReplacements) {
- Value repl = mapping.lookupOrNull(arg, arg.getType());
- if (!repl)
+LogicalResult BlockTypeConversionRewrite::materializeLiveConversions(
+ function_ref<Operation *(Value)> findLiveUser) {
+ // Process the remapping for each of the original arguments.
+ for (auto it : llvm::enumerate(origBlock->getArguments())) {
+ BlockArgument origArg = it.value();
+ // Note: `block` may be detached, so OpBuilder::atBlockBegin cannot be used.
+ OpBuilder builder(it.value().getContext(), /*listener=*/&rewriterImpl);
+ builder.setInsertionPointToStart(block);
+
+ // If the type of this argument changed and the argument is still live, we
+ // need to materialize a conversion.
+ if (rewriterImpl.mapping.lookupOrNull(origArg, origArg.getType()))
continue;
-
- if (isa<BlockArgument>(repl)) {
- arg.replaceAllUsesWith(repl);
+ Operation *liveUser = findLiveUser(origArg);
+ if (!liveUser)
continue;
+
+ Value replacementValue = rewriterImpl.mapping.lookupOrDefault(origArg);
+ bool isDroppedArg = replacementValue == origArg;
+ if (!isDroppedArg)
+ builder.setInsertionPointAfterValue(replacementValue);
+ Value newArg;
+ if (converter) {
+ newArg = converter->materializeSourceConversion(
+ builder, origArg.getLoc(), origArg.getType(),
+ isDroppedArg ? ValueRange() : ValueRange(replacementValue));
+ assert((!newArg || newArg.getType() == origArg.getType()) &&
+ "materialization hook did not provide a value of the expected "
+ "type");
}
+ if (!newArg) {
+ InFlightDiagnostic diag =
+ emitError(origArg.getLoc())
+ << "failed to materialize conversion for block argument #"
+ << it.index() << " that remained live after conversion, type was "
+ << origArg.getType();
+ if (!isDroppedArg)
+ diag << ", with target type " << replacementValue.getType();
+ diag.attachNote(liveUser->getLoc())
+ << "see existing live user here: " << *liveUser;
+ return failure();
+ }
+ rewriterImpl.mapping.map(origArg, newArg);
+ }
+ return success();
+}
- // If the replacement value is an operation, we check to make sure that we
- // don't replace uses that are within the parent operation of the
- // replacement value.
- Operation *replOp = cast<OpResult>(repl).getOwner();
- Block *replBlock = replOp->getBlock();
- arg.replaceUsesWithIf(repl, [&](OpOperand &operand) {
- Operation *user = operand.getOwner();
- return user->getBlock() != replBlock || replOp->isBeforeInBlock(user);
- });
+void ReplaceBlockArgRewrite::commit() {
+ Value repl = rewriterImpl.mapping.lookupOrNull(arg, arg.getType());
+ if (!repl)
+ return;
+
+ if (isa<BlockArgument>(repl)) {
+ arg.replaceAllUsesWith(repl);
+ return;
}
- // Drop all of the unresolved materialization operations created during
- // conversion.
- for (auto &mat : unresolvedMaterializations) {
- mat.getOp()->dropAllUses();
- mat.getOp()->erase();
+ // If the replacement value is an operation, we check to make sure that we
+ // don't replace uses that are within the parent operation of the
+ // replacement value.
+ Operation *replOp = cast<OpResult>(repl).getOwner();
+ Block *replBlock = replOp->getBlock();
+ arg.replaceUsesWithIf(repl, [&](OpOperand &operand) {
+ Operation *user = operand.getOwner();
+ return user->getBlock() != replBlock || replOp->isBeforeInBlock(user);
+ });
+}
+
+void ReplaceBlockArgRewrite::rollback() { rewriterImpl.mapping.erase(arg); }
+
+void ReplaceOperationRewrite::commit() {
+ for (OpResult result : op->getResults())
+ if (Value newValue =
+ rewriterImpl.mapping.lookupOrNull(result, result.getType()))
+ result.replaceAllUsesWith(newValue);
+ if (getConfig().unlegalizedOps)
+ getConfig().unlegalizedOps->erase(op);
+ // Do not erase the operation yet. It may still be referenced in `mapping`.
+ op->getBlock()->getOperations().remove(op);
+}
+
+void ReplaceOperationRewrite::rollback() {
+ for (auto result : op->getResults())
+ rewriterImpl.mapping.erase(result);
+}
+
+void ReplaceOperationRewrite::cleanup() { eraseOp(op); }
+
+void CreateOperationRewrite::rollback() {
+ for (Region &region : op->getRegions()) {
+ while (!region.getBlocks().empty())
+ region.getBlocks().remove(region.getBlocks().begin());
}
+ op->dropAllUses();
+ eraseOp(op);
+}
- // In a second pass, erase all of the replaced operations in reverse. This
- // allows processing nested operations before their parent region is
- // destroyed. Because we process in reverse order, producers may be deleted
- // before their users (a pattern deleting a producer and then the consumer)
- // so we first drop all uses explicitly.
- for (auto &repl : llvm::reverse(replacements)) {
- repl.first->dropAllUses();
- repl.first->erase();
+void UnresolvedMaterializationRewrite::rollback() {
+ if (getMaterializationKind() == MaterializationKind::Target) {
+ for (Value input : op->getOperands())
+ rewriterImpl.mapping.erase(input);
}
+ eraseOp(op);
+}
- argConverter.applyRewrites(mapping);
+void UnresolvedMaterializationRewrite::cleanup() { eraseOp(op); }
+void ConversionPatternRewriterImpl::applyRewrites() {
// Commit all rewrites.
for (auto &rewrite : rewrites)
rewrite->commit();
+ for (auto &rewrite : rewrites)
+ rewrite->cleanup();
}
//===----------------------------------------------------------------------===//
// State Management
RewriterState ConversionPatternRewriterImpl::getCurrentState() {
- return RewriterState(createdOps.size(), unresolvedMaterializations.size(),
- replacements.size(), argReplacements.size(),
- rewrites.size(), ignoredOps.size());
+ return RewriterState(rewrites.size(), ignoredOps.size(),
+ eraseRewriter.erased.size());
}
void ConversionPatternRewriterImpl::resetState(RewriterState state) {
- // Reset any replaced arguments.
- for (BlockArgument replacedArg :
- llvm::drop_begin(argReplacements, state.numArgReplacements))
- mapping.erase(replacedArg);
- argReplacements.resize(state.numArgReplacements);
-
// Undo any rewrites.
undoRewrites(state.numRewrites);
- // Reset any replaced operations and undo any saved mappings.
- for (auto &repl : llvm::drop_begin(replacements, state.numReplacements))
- for (auto result : repl.first->getResults())
- mapping.erase(result);
- while (replacements.size() != state.numReplacements)
- replacements.pop_back();
-
- // Pop all of the newly inserted materializations.
- while (unresolvedMaterializations.size() !=
- state.numUnresolvedMaterializations) {
- UnresolvedMaterialization mat = unresolvedMaterializations.pop_back_val();
- UnrealizedConversionCastOp op = mat.getOp();
-
- // If this was a target materialization, drop the mapping that was inserted.
- if (mat.getKind() == UnresolvedMaterialization::Target) {
- for (Value input : op->getOperands())
- mapping.erase(input);
- }
- detachNestedAndErase(op);
- }
-
- // Pop all of the newly created operations.
- while (createdOps.size() != state.numCreatedOps) {
- detachNestedAndErase(createdOps.back());
- createdOps.pop_back();
- }
-
// Pop all of the recorded ignored operations that are no longer valid.
while (ignoredOps.size() != state.numIgnoredOperations)
ignoredOps.pop_back();
- // Reset operations with changed results.
- while (!operationsWithChangedResults.empty() &&
- operationsWithChangedResults.back() >= state.numReplacements)
- operationsWithChangedResults.pop_back();
+ while (eraseRewriter.erased.size() != state.numErased)
+ eraseRewriter.erased.pop_back();
}
void ConversionPatternRewriterImpl::undoRewrites(unsigned numRewritesToKeep) {
@@ -1413,8 +1219,7 @@ LogicalResult ConversionPatternRewriterImpl::remapValues(
if (currentTypeConverter && desiredType && newOperandType != desiredType) {
Location operandLoc = inputLoc ? *inputLoc : operand.getLoc();
Value castValue = buildUnresolvedTargetMaterialization(
- operandLoc, newOperand, desiredType, currentTypeConverter,
- unresolvedMaterializations);
+ operandLoc, newOperand, desiredType, currentTypeConverter);
mapping.map(mapping.lookupOrDefault(newOperand), castValue);
newOperand = castValue;
}
@@ -1425,7 +1230,8 @@ LogicalResult ConversionPatternRewriterImpl::remapValues(
bool ConversionPatternRewriterImpl::isOpIgnored(Operation *op) const {
// Check to see if this operation was replaced or its parent ignored.
- return replacements.count(op) || ignoredOps.count(op->getParentOp());
+ return ignoredOps.count(op->getParentOp()) ||
+ hasRewrite<ReplaceOperationRewrite>(rewrites, op);
}
void ConversionPatternRewriterImpl::markNestedOpsIgnored(Operation *op) {
@@ -1447,18 +1253,18 @@ void ConversionPatternRewriterImpl::markNestedOpsIgnored(Operation *op) {
FailureOr<Block *> ConversionPatternRewriterImpl::convertBlockSignature(
Block *block, const TypeConverter *converter,
TypeConverter::SignatureConversion *conversion) {
- FailureOr<Block *> result =
- conversion ? argConverter.applySignatureConversion(
- block, converter, *conversion, mapping, argReplacements)
- : argConverter.convertSignature(block, converter, mapping,
- argReplacements);
- if (failed(result))
+ if (conversion)
+ return applySignatureConversion(block, converter, *conversion);
+
+ // If a converter wasn't provided, and the block wasn't already converted,
+ // there is nothing we can do.
+ if (!converter)
return failure();
- if (Block *newBlock = *result) {
- if (newBlock != block)
- appendRewrite<BlockTypeConversionRewrite>(newBlock);
- }
- return result;
+
+ // Try to convert the signature for the block with the provided converter.
+ if (auto conversion = converter->convertBlockSignature(block))
+ return applySignatureConversion(block, converter, *conversion);
+ return failure();
}
Block *ConversionPatternRewriterImpl::applySignatureConversion(
@@ -1512,6 +1318,145 @@ LogicalResult ConversionPatternRewriterImpl::convertNonEntryRegionTypes(
return success();
}
+Block *ConversionPatternRewriterImpl::applySignatureConversion(
+ Block *block, const TypeConverter *converter,
+ TypeConverter::SignatureConversion &signatureConversion) {
+ MLIRContext *ctx = eraseRewriter.getContext();
+
+ // If no arguments are being changed or added, there is nothing to do.
+ unsigned origArgCount = block->getNumArguments();
+ auto convertedTypes = signatureConversion.getConvertedTypes();
+ if (llvm::equal(block->getArgumentTypes(), convertedTypes))
+ return block;
+
+ // Split the block at the beginning to get a new block to use for the updated
+ // signature.
+ Block *newBlock = block->splitBlock(block->begin());
+ block->replaceAllUsesWith(newBlock);
+ // Unlink the block, but do not erase it yet, so that the change can be rolled
+ // back.
+ block->getParent()->getBlocks().remove(block);
+
+ // Map all new arguments to the location of the argument they originate from.
+ SmallVector<Location> newLocs(convertedTypes.size(),
+ Builder(ctx).getUnknownLoc());
+ for (unsigned i = 0; i < origArgCount; ++i) {
+ auto inputMap = signatureConversion.getInputMapping(i);
+ if (!inputMap || inputMap->replacementValue)
+ continue;
+ Location origLoc = block->getArgument(i).getLoc();
+ for (unsigned j = 0; j < inputMap->size; ++j)
+ newLocs[inputMap->inputNo + j] = origLoc;
+ }
+
+ SmallVector<Value, 4> newArgRange(
+ newBlock->addArguments(convertedTypes, newLocs));
+ ArrayRef<Value> newArgs(newArgRange);
+
+ // Remap each of the original arguments as determined by the signature
+ // conversion.
+ SmallVector<std::optional<ConvertedArgInfo>, 1> argInfo;
+ argInfo.resize(origArgCount);
+
+ for (unsigned i = 0; i != origArgCount; ++i) {
+ auto inputMap = signatureConversion.getInputMapping(i);
+ if (!inputMap)
+ continue;
+ BlockArgument origArg = block->getArgument(i);
+
+ // If inputMap->replacementValue is not nullptr, then the argument is
+ // dropped and a replacement value is provided to be the remappedValue.
+ if (inputMap->replacementValue) {
+ assert(inputMap->size == 0 &&
+ "invalid to provide a replacement value when the argument isn't "
+ "dropped");
+ mapping.map(origArg, inputMap->replacementValue);
+ appendRewrite<ReplaceBlockArgRewrite>(block, origArg);
+ continue;
+ }
+
+ // Otherwise, this is a 1->1+ mapping.
+ auto replArgs = newArgs.slice(inputMap->inputNo, inputMap->size);
+ Value newArg;
+
+ // If this is a 1->1 mapping and the types of new and replacement arguments
+ // match (i.e. it's an identity map), then the argument is mapped to its
+ // original type.
+ // FIXME: We simply pass through the replacement argument if there wasn't a
+ // converter, which isn't great as it allows implicit type conversions to
+ // appear. We should properly restructure this code to handle cases where a
+ // converter isn't provided and also to properly handle the case where an
+ // argument materialization is actually a temporary source materialization
+ // (e.g. in the case of 1->N).
+ if (replArgs.size() == 1 &&
+ (!converter || replArgs[0].getType() == origArg.getType())) {
+ newArg = replArgs.front();
+ } else {
+ Type origOutputType = origArg.getType();
+
+ // Legalize the argument output type.
+ Type outputType = origOutputType;
+ if (Type legalOutputType = converter->convertType(outputType))
+ outputType = legalOutputType;
+
+ newArg = buildUnresolvedArgumentMaterialization(
+ newBlock, origArg.getLoc(), replArgs, origOutputType, outputType,
+ converter);
+ }
+
+ mapping.map(origArg, newArg);
+ appendRewrite<ReplaceBlockArgRewrite>(block, origArg);
+ argInfo[i] = ConvertedArgInfo(inputMap->inputNo, inputMap->size, newArg);
+ }
+
+ appendRewrite<BlockTypeConversionRewrite>(newBlock, block, argInfo,
+ converter);
+ return newBlock;
+}
+
+//===----------------------------------------------------------------------===//
+// Materializations
+//===----------------------------------------------------------------------===//
+
+/// Build an unresolved materialization operation given an output type and set
+/// of input operands.
+Value ConversionPatternRewriterImpl::buildUnresolvedMaterialization(
+ MaterializationKind kind, Block *insertBlock, Block::iterator insertPt,
+ Location loc, ValueRange inputs, Type outputType, Type origOutputType,
+ const TypeConverter *converter) {
+ // Avoid materializing an unnecessary cast.
+ if (inputs.size() == 1 && inputs.front().getType() == outputType)
+ return inputs.front();
+
+ // Create an unresolved materialization. We use a new OpBuilder to avoid
+ // tracking the materialization like we do for other operations.
+ OpBuilder builder(insertBlock, insertPt);
+ auto convertOp =
+ builder.create<UnrealizedConversionCastOp>(loc, outputType, inputs);
+ appendRewrite<UnresolvedMaterializationRewrite>(convertOp, converter, kind,
+ origOutputType);
+ return convertOp.getResult(0);
+}
+Value ConversionPatternRewriterImpl::buildUnresolvedArgumentMaterialization(
+ Block *block, Location loc, ValueRange inputs, Type origOutputType,
+ Type outputType, const TypeConverter *converter) {
+ return buildUnresolvedMaterialization(MaterializationKind::Argument, block,
+ block->begin(), loc, inputs, outputType,
+ origOutputType, converter);
+}
+Value ConversionPatternRewriterImpl::buildUnresolvedTargetMaterialization(
+ Location loc, Value input, Type outputType,
+ const TypeConverter *converter) {
+ Block *insertBlock = input.getParentBlock();
+ Block::iterator insertPt = insertBlock->begin();
+ if (OpResult inputRes = dyn_cast<OpResult>(input))
+ insertPt = ++inputRes.getOwner()->getIterator();
+
+ return buildUnresolvedMaterialization(MaterializationKind::Target,
+ insertBlock, insertPt, loc, input,
+ outputType, outputType, converter);
+}
+
//===----------------------------------------------------------------------===//
// Rewriter Notification Hooks
@@ -1523,7 +1468,7 @@ void ConversionPatternRewriterImpl::notifyOperationInserted(
});
if (!previous.isSet()) {
// This is a newly created op.
- createdOps.push_back(op);
+ appendRewrite<CreateOperationRewrite>(op);
return;
}
Operation *prevOp = previous.getPoint() == previous.getBlock()->end()
@@ -1535,7 +1480,12 @@ void ConversionPatternRewriterImpl::notifyOperationInserted(
void ConversionPatternRewriterImpl::notifyOpReplaced(Operation *op,
ValueRange newValues) {
assert(newValues.size() == op->getNumResults());
- assert(!replacements.count(op) && "operation was already replaced");
+#ifndef NDEBUG
+ for (auto &rewrite : rewrites)
+ if (auto *opReplacement = dyn_cast<ReplaceOperationRewrite>(rewrite.get()))
+ assert(opReplacement->getOperation() != op &&
+ "operation was already replaced");
+#endif // NDEBUG
// Track if any of the results changed, e.g. erased and replaced with null.
bool resultChanged = false;
@@ -1550,11 +1500,9 @@ void ConversionPatternRewriterImpl::notifyOpReplaced(Operation *op,
mapping.map(result, newValue);
resultChanged |= (newValue.getType() != result.getType());
}
- if (resultChanged)
- operationsWithChangedResults.push_back(replacements.size());
- // Record the requested operation replacement.
- replacements.insert(std::make_pair(op, OpReplacement(currentTypeConverter)));
+ appendRewrite<ReplaceOperationRewrite>(op, currentTypeConverter,
+ resultChanged);
// Mark this operation as recursively ignored so that we don't need to
// convert any nested operations.
@@ -1594,8 +1542,8 @@ void ConversionPatternRewriterImpl::notifyMatchFailure(
Diagnostic diag(loc, DiagnosticSeverity::Remark);
reasonCallback(diag);
logger.startLine() << "** Failure : " << diag.str() << "\n";
- if (notifyCallback)
- notifyCallback(diag);
+ if (config.notifyCallback)
+ config.notifyCallback(diag);
});
}
@@ -1603,9 +1551,10 @@ void ConversionPatternRewriterImpl::notifyMatchFailure(
// ConversionPatternRewriter
//===----------------------------------------------------------------------===//
-ConversionPatternRewriter::ConversionPatternRewriter(MLIRContext *ctx)
+ConversionPatternRewriter::ConversionPatternRewriter(
+ MLIRContext *ctx, const ConversionConfig &config)
: PatternRewriter(ctx),
- impl(new detail::ConversionPatternRewriterImpl(*this)) {
+ impl(new detail::ConversionPatternRewriterImpl(ctx, config)) {
setListener(impl.get());
}
@@ -1649,8 +1598,6 @@ void ConversionPatternRewriter::eraseOp(Operation *op) {
}
void ConversionPatternRewriter::eraseBlock(Block *block) {
- impl->notifyBlockIsBeingErased(block);
-
// Mark all ops for erasure.
for (Operation &op : *block)
eraseOp(&op);
@@ -1659,6 +1606,7 @@ void ConversionPatternRewriter::eraseBlock(Block *block) {
// object and will be actually destroyed when rewrites are applied. This
// allows us to keep the operations in the block live and undo the removal by
// re-inserting the block.
+ impl->notifyBlockIsBeingErased(block);
block->getParent()->getBlocks().remove(block);
}
@@ -1688,7 +1636,7 @@ void ConversionPatternRewriter::replaceUsesOfBlockArgument(BlockArgument from,
<< "'(in region of '" << parentOp->getName()
<< "'(" << from.getOwner()->getParentOp() << ")\n";
});
- impl->argReplacements.push_back(from);
+ impl->appendRewrite<ReplaceBlockArgRewrite>(from.getOwner(), from);
impl->mapping.map(impl->mapping.lookupOrDefault(from), to);
}
@@ -2022,13 +1970,16 @@ OperationLegalizer::legalizeWithFold(Operation *op,
rewriter.replaceOp(op, replacementValues);
// Recursively legalize any new constant operations.
- for (unsigned i = curState.numCreatedOps, e = rewriterImpl.createdOps.size();
+ for (unsigned i = curState.numRewrites, e = rewriterImpl.rewrites.size();
i != e; ++i) {
- Operation *cstOp = rewriterImpl.createdOps[i];
- if (failed(legalize(cstOp, rewriter))) {
+ auto *createOp =
+ dyn_cast<CreateOperationRewrite>(rewriterImpl.rewrites[i].get());
+ if (!createOp)
+ continue;
+ if (failed(legalize(createOp->getOperation(), rewriter))) {
LLVM_DEBUG(logFailure(rewriterImpl.logger,
"failed to legalize generated constant '{0}'",
- cstOp->getName()));
+ createOp->getOperation()->getName()));
rewriterImpl.resetState(curState);
return failure();
}
@@ -2054,12 +2005,12 @@ OperationLegalizer::legalizeWithPattern(Operation *op,
assert(rewriterImpl.pendingRootUpdates.empty() && "dangling root updates");
LLVM_DEBUG({
logFailure(rewriterImpl.logger, "pattern failed to match");
- if (rewriterImpl.notifyCallback) {
+ if (rewriterImpl.config.notifyCallback) {
Diagnostic diag(op->getLoc(), DiagnosticSeverity::Remark);
diag << "Failed to apply pattern \"" << pattern.getDebugName()
<< "\" on op:\n"
<< *op;
- rewriterImpl.notifyCallback(diag);
+ rewriterImpl.config.notifyCallback(diag);
}
});
rewriterImpl.resetState(curState);
@@ -2112,16 +2063,13 @@ OperationLegalizer::legalizePatternResult(Operation *op, const Pattern &pattern,
#ifndef NDEBUG
assert(impl.pendingRootUpdates.empty() && "dangling root updates");
-
// Check that the root was either replaced or updated in place.
+ auto newRewrites = llvm::drop_begin(impl.rewrites, curState.numRewrites);
auto replacedRoot = [&] {
- return llvm::any_of(
- llvm::drop_begin(impl.replacements, curState.numReplacements),
- [op](auto &it) { return it.first == op; });
+ return hasRewrite<ReplaceOperationRewrite>(newRewrites, op);
};
auto updatedRootInPlace = [&] {
- return hasRewrite<ModifyOperationRewrite>(
- llvm::drop_begin(impl.rewrites, curState.numRewrites), op);
+ return hasRewrite<ModifyOperationRewrite>(newRewrites, op);
};
assert((replacedRoot() || updatedRootInPlace()) &&
"expected pattern to replace the root operation");
@@ -2154,7 +2102,8 @@ LogicalResult OperationLegalizer::legalizePatternBlockRewrites(
if (!rewrite)
continue;
Block *block = rewrite->getBlock();
- if (isa<BlockTypeConversionRewrite, EraseBlockRewrite>(rewrite))
+ if (isa<BlockTypeConversionRewrite, EraseBlockRewrite,
+ ReplaceBlockArgRewrite>(rewrite))
continue;
// Only check blocks outside of the current operation.
Operation *parentOp = block->getParentOp();
@@ -2177,9 +2126,14 @@ LogicalResult OperationLegalizer::legalizePatternBlockRewrites(
// blocks in regions created by this pattern will already be legalized later
// on. If we haven't built the set yet, build it now.
if (operationsToIgnore.empty()) {
- auto createdOps = ArrayRef<Operation *>(impl.createdOps)
- .drop_front(state.numCreatedOps);
- operationsToIgnore.insert(createdOps.begin(), createdOps.end());
+ for (unsigned i = state.numRewrites, e = impl.rewrites.size(); i != e;
+ ++i) {
+ auto *createOp =
+ dyn_cast<CreateOperationRewrite>(impl.rewrites[i].get());
+ if (!createOp)
+ continue;
+ operationsToIgnore.insert(createOp->getOperation());
+ }
}
// If this operation should be considered for re-legalization, try it.
@@ -2197,8 +2151,11 @@ LogicalResult OperationLegalizer::legalizePatternBlockRewrites(
LogicalResult OperationLegalizer::legalizePatternCreatedOperations(
ConversionPatternRewriter &rewriter, ConversionPatternRewriterImpl &impl,
RewriterState &state, RewriterState &newState) {
- for (int i = state.numCreatedOps, e = newState.numCreatedOps; i != e; ++i) {
- Operation *op = impl.createdOps[i];
+ for (int i = state.numRewrites, e = newState.numRewrites; i != e; ++i) {
+ auto *createOp = dyn_cast<CreateOperationRewrite>(impl.rewrites[i].get());
+ if (!createOp)
+ continue;
+ Operation *op = createOp->getOperation();
if (failed(legalize(op, rewriter))) {
LLVM_DEBUG(logFailure(impl.logger,
"failed to legalize generated operation '{0}'({1})",
@@ -2432,21 +2389,21 @@ enum OpConversionMode {
/// applied to the operations on success.
Analysis,
};
+} // namespace
+namespace mlir {
// This class converts operations to a given conversion target via a set of
// rewrite patterns. The conversion behaves differently depending on the
// conversion mode.
struct OperationConverter {
explicit OperationConverter(const ConversionTarget &target,
const FrozenRewritePatternSet &patterns,
- OpConversionMode mode,
- DenseSet<Operation *> *trackedOps = nullptr)
- : opLegalizer(target, patterns), mode(mode), trackedOps(trackedOps) {}
+ const ConversionConfig &config,
+ OpConversionMode mode)
+ : opLegalizer(target, patterns), config(config), mode(mode) {}
/// Converts the given operations to the conversion target.
- LogicalResult
- convertOperations(ArrayRef<Operation *> ops,
- function_ref<void(Diagnostic &)> notifyCallback = nullptr);
+ LogicalResult convertOperations(ArrayRef<Operation *> ops);
private:
/// Converts an operation with the given rewriter.
@@ -2483,16 +2440,13 @@ private:
/// The legalizer to use when converting operations.
OperationLegalizer opLegalizer;
+ /// Dialect conversion configuration.
+ ConversionConfig config;
+
/// The conversion mode to use when legalizing operations.
OpConversionMode mode;
-
- /// A set of pre-existing operations. When mode == OpConversionMode::Analysis,
- /// this is populated with ops found to be legalizable to the target.
- /// When mode == OpConversionMode::Partial, this is populated with ops found
- /// *not* to be legalizable to the target.
- DenseSet<Operation *> *trackedOps;
};
-} // namespace
+} // namespace mlir
LogicalResult OperationConverter::convert(ConversionPatternRewriter &rewriter,
Operation *op) {
@@ -2504,28 +2458,27 @@ LogicalResult OperationConverter::convert(ConversionPatternRewriter &rewriter,
return op->emitError()
<< "failed to legalize operation '" << op->getName() << "'";
// Partial conversions allow conversions to fail iff the operation was not
- // explicitly marked as illegal. If the user provided a nonlegalizableOps
- // set, non-legalizable ops are included.
+ // explicitly marked as illegal. If the user provided a `unlegalizedOps`
+ // set, non-legalizable ops are added to that set.
if (mode == OpConversionMode::Partial) {
if (opLegalizer.isIllegal(op))
return op->emitError()
<< "failed to legalize operation '" << op->getName()
<< "' that was explicitly marked illegal";
- if (trackedOps)
- trackedOps->insert(op);
+ if (config.unlegalizedOps)
+ config.unlegalizedOps->insert(op);
}
} else if (mode == OpConversionMode::Analysis) {
// Analysis conversions don't fail if any operations fail to legalize,
// they are only interested in the operations that were successfully
// legalized.
- trackedOps->insert(op);
+ if (config.legalizableOps)
+ config.legalizableOps->insert(op);
}
return success();
}
-LogicalResult OperationConverter::convertOperations(
- ArrayRef<Operation *> ops,
- function_ref<void(Diagnostic &)> notifyCallback) {
+LogicalResult OperationConverter::convertOperations(ArrayRef<Operation *> ops) {
if (ops.empty())
return success();
const ConversionTarget &target = opLegalizer.getTarget();
@@ -2546,33 +2499,25 @@ LogicalResult OperationConverter::convertOperations(
}
// Convert each operation and discard rewrites on failure.
- ConversionPatternRewriter rewriter(ops.front()->getContext());
+ ConversionPatternRewriter rewriter(ops.front()->getContext(), config);
ConversionPatternRewriterImpl &rewriterImpl = rewriter.getImpl();
- rewriterImpl.notifyCallback = notifyCallback;
for (auto *op : toConvert)
if (failed(convert(rewriter, op)))
- return rewriterImpl.discardRewrites(), failure();
+ return rewriterImpl.undoRewrites(), failure();
// Now that all of the operations have been converted, finalize the conversion
// process to ensure any lingering conversion artifacts are cleaned up and
// legalized.
if (failed(finalize(rewriter)))
- return rewriterImpl.discardRewrites(), failure();
+ return rewriterImpl.undoRewrites(), failure();
// After a successful conversion, apply rewrites if this is not an analysis
// conversion.
if (mode == OpConversionMode::Analysis) {
- rewriterImpl.discardRewrites();
+ rewriterImpl.undoRewrites();
} else {
rewriterImpl.applyRewrites();
-
- // It is possible for a later pattern to erase an op that was originally
- // identified as illegal and added to the trackedOps, remove it now after
- // replacements have been computed.
- if (trackedOps)
- for (auto &repl : rewriterImpl.replacements)
- trackedOps->erase(repl.first);
}
return success();
}
@@ -2586,21 +2531,20 @@ OperationConverter::finalize(ConversionPatternRewriter &rewriter) {
failed(legalizeConvertedArgumentTypes(rewriter, rewriterImpl)))
return failure();
- if (rewriterImpl.operationsWithChangedResults.empty())
- return success();
-
// Process requested operation replacements.
- for (unsigned i = 0, e = rewriterImpl.operationsWithChangedResults.size();
- i != e; ++i) {
- unsigned replIdx = rewriterImpl.operationsWithChangedResults[i];
- auto &repl = *(rewriterImpl.replacements.begin() + replIdx);
- for (OpResult result : repl.first->getResults()) {
+ for (unsigned i = 0; i < rewriterImpl.rewrites.size(); ++i) {
+ auto *opReplacement =
+ dyn_cast<ReplaceOperationRewrite>(rewriterImpl.rewrites[i].get());
+ if (!opReplacement || !opReplacement->hasChangedResults())
+ continue;
+ Operation *op = opReplacement->getOperation();
+ for (OpResult result : op->getResults()) {
Value newValue = rewriterImpl.mapping.lookupOrNull(result);
// If the operation result was replaced with null, all of the uses of this
// value should be replaced.
if (!newValue) {
- if (failed(legalizeErasedResult(repl.first, result, rewriterImpl)))
+ if (failed(legalizeErasedResult(op, result, rewriterImpl)))
return failure();
continue;
}
@@ -2614,15 +2558,11 @@ OperationConverter::finalize(ConversionPatternRewriter &rewriter) {
inverseMapping = rewriterImpl.mapping.getInverse();
// Legalize this result.
- rewriter.setInsertionPoint(repl.first);
- if (failed(legalizeChangedResultType(repl.first, result, newValue,
- repl.second.converter, rewriter,
- rewriterImpl, *inverseMapping)))
+ rewriter.setInsertionPoint(op);
+ if (failed(legalizeChangedResultType(
+ op, result, newValue, opReplacement->getConverter(), rewriter,
+ rewriterImpl, *inverseMapping)))
return failure();
-
- // Update the end iterator for this loop in the case it was updated
- // when legalizing generated conversion operations.
- e = rewriterImpl.operationsWithChangedResults.size();
}
}
return success();
@@ -2639,8 +2579,17 @@ LogicalResult OperationConverter::legalizeConvertedArgumentTypes(
});
return liveUserIt == val.user_end() ? nullptr : *liveUserIt;
};
- return rewriterImpl.argConverter.materializeLiveConversions(
- rewriterImpl.mapping, rewriter, findLiveUser);
+ // Note: `rewrites` may be reallocated as the loop is running.
+ for (int64_t i = 0; i < static_cast<int64_t>(rewriterImpl.rewrites.size());
+ ++i) {
+ auto &rewrite = rewriterImpl.rewrites[i];
+ if (auto *blockTypeConversionRewrite =
+ dyn_cast<BlockTypeConversionRewrite>(rewrite.get()))
+ if (failed(blockTypeConversionRewrite->materializeLiveConversions(
+ findLiveUser)))
+ return failure();
+ }
+ return success();
}
/// Replace the results of a materialization operation with the given values.
@@ -2672,11 +2621,12 @@ replaceMaterialization(ConversionPatternRewriterImpl &rewriterImpl,
/// Compute all of the unresolved materializations that will persist beyond the
/// conversion process, and require inserting a proper user materialization for.
static void computeNecessaryMaterializations(
- DenseMap<Operation *, UnresolvedMaterialization *> &materializationOps,
+ DenseMap<Operation *, UnresolvedMaterializationRewrite *>
+ &materializationOps,
ConversionPatternRewriter &rewriter,
ConversionPatternRewriterImpl &rewriterImpl,
DenseMap<Value, SmallVector<Value>> &inverseMapping,
- SetVector<UnresolvedMaterialization *> &necessaryMaterializations) {
+ SetVector<UnresolvedMaterializationRewrite *> &necessaryMaterializations) {
auto isLive = [&](Value value) {
auto findFn = [&](Operation *user) {
auto matIt = materializationOps.find(user);
@@ -2711,14 +2661,17 @@ static void computeNecessaryMaterializations(
return Value();
};
- SetVector<UnresolvedMaterialization *> worklist;
- for (auto &mat : rewriterImpl.unresolvedMaterializations) {
- materializationOps.try_emplace(mat.getOp(), &mat);
- worklist.insert(&mat);
+ SetVector<UnresolvedMaterializationRewrite *> worklist;
+ for (auto &rewrite : rewriterImpl.rewrites) {
+ auto *mat = dyn_cast<UnresolvedMaterializationRewrite>(rewrite.get());
+ if (!mat)
+ continue;
+ materializationOps.try_emplace(mat->getOperation(), mat);
+ worklist.insert(mat);
}
while (!worklist.empty()) {
- UnresolvedMaterialization *mat = worklist.pop_back_val();
- UnrealizedConversionCastOp op = mat->getOp();
+ UnresolvedMaterializationRewrite *mat = worklist.pop_back_val();
+ UnrealizedConversionCastOp op = mat->getOperation();
// We currently only handle target materializations here.
assert(op->getNumResults() == 1 && "unexpected materialization type");
@@ -2760,7 +2713,7 @@ static void computeNecessaryMaterializations(
auto isBlockArg = [](Value v) { return isa<BlockArgument>(v); };
if (llvm::any_of(op->getOperands(), isBlockArg) ||
llvm::any_of(inverseMapping[op->getResult(0)], isBlockArg)) {
- mat->setKind(UnresolvedMaterialization::Argument);
+ mat->setMaterializationKind(MaterializationKind::Argument);
}
// If the materialization does not have any live users, we don't need to
@@ -2770,7 +2723,7 @@ static void computeNecessaryMaterializations(
// value replacement even if the types differ in some cases. When those
// patterns are fixed, we can drop the argument special case here.
bool isMaterializationLive = isLive(opResult);
- if (mat->getKind() == UnresolvedMaterialization::Argument)
+ if (mat->getMaterializationKind() == MaterializationKind::Argument)
isMaterializationLive |= llvm::any_of(inverseMapping[opResult], isLive);
if (!isMaterializationLive)
continue;
@@ -2790,8 +2743,9 @@ static void computeNecessaryMaterializations(
/// Legalize the given unresolved materialization. Returns success if the
/// materialization was legalized, failure otherise.
static LogicalResult legalizeUnresolvedMaterialization(
- UnresolvedMaterialization &mat,
- DenseMap<Operation *, UnresolvedMaterialization *> &materializationOps,
+ UnresolvedMaterializationRewrite &mat,
+ DenseMap<Operation *, UnresolvedMaterializationRewrite *>
+ &materializationOps,
ConversionPatternRewriter &rewriter,
ConversionPatternRewriterImpl &rewriterImpl,
DenseMap<Value, SmallVector<Value>> &inverseMapping) {
@@ -2811,7 +2765,7 @@ static LogicalResult legalizeUnresolvedMaterialization(
return Value();
};
- UnrealizedConversionCastOp op = mat.getOp();
+ UnrealizedConversionCastOp op = mat.getOperation();
if (!rewriterImpl.ignoredOps.insert(op))
return success();
@@ -2861,8 +2815,8 @@ static LogicalResult legalizeUnresolvedMaterialization(
rewriter.setInsertionPoint(op);
Value newMaterialization;
- switch (mat.getKind()) {
- case UnresolvedMaterialization::Argument:
+ switch (mat.getMaterializationKind()) {
+ case MaterializationKind::Argument:
// Try to materialize an argument conversion.
// FIXME: The current argument materialization hook expects the original
// output type, even though it doesn't use that as the actual output type
@@ -2879,7 +2833,7 @@ static LogicalResult legalizeUnresolvedMaterialization(
// If an argument materialization failed, fallback to trying a target
// materialization.
[[fallthrough]];
- case UnresolvedMaterialization::Target:
+ case MaterializationKind::Target:
newMaterialization = converter->materializeTargetConversion(
rewriter, op->getLoc(), outputType, inputOperands);
break;
@@ -2907,14 +2861,12 @@ LogicalResult OperationConverter::legalizeUnresolvedMaterializations(
ConversionPatternRewriter &rewriter,
ConversionPatternRewriterImpl &rewriterImpl,
std::optional<DenseMap<Value, SmallVector<Value>>> &inverseMapping) {
- if (rewriterImpl.unresolvedMaterializations.empty())
- return success();
inverseMapping = rewriterImpl.mapping.getInverse();
// As an initial step, compute all of the inserted materializations that we
// expect to persist beyond the conversion process.
- DenseMap<Operation *, UnresolvedMaterialization *> materializationOps;
- SetVector<UnresolvedMaterialization *> necessaryMaterializations;
+ DenseMap<Operation *, UnresolvedMaterializationRewrite *> materializationOps;
+ SetVector<UnresolvedMaterializationRewrite *> necessaryMaterializations;
computeNecessaryMaterializations(materializationOps, rewriter, rewriterImpl,
*inverseMapping, necessaryMaterializations);
@@ -3535,57 +3487,51 @@ void mlir::registerConversionPDLFunctions(RewritePatternSet &patterns) {
//===----------------------------------------------------------------------===//
// Partial Conversion
-LogicalResult
-mlir::applyPartialConversion(ArrayRef<Operation *> ops,
- const ConversionTarget &target,
- const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> *unconvertedOps) {
- OperationConverter opConverter(target, patterns, OpConversionMode::Partial,
- unconvertedOps);
+LogicalResult mlir::applyPartialConversion(
+ ArrayRef<Operation *> ops, const ConversionTarget &target,
+ const FrozenRewritePatternSet &patterns, ConversionConfig config) {
+ OperationConverter opConverter(target, patterns, config,
+ OpConversionMode::Partial);
return opConverter.convertOperations(ops);
}
LogicalResult
mlir::applyPartialConversion(Operation *op, const ConversionTarget &target,
const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> *unconvertedOps) {
- return applyPartialConversion(llvm::ArrayRef(op), target, patterns,
- unconvertedOps);
+ ConversionConfig config) {
+ return applyPartialConversion(llvm::ArrayRef(op), target, patterns, config);
}
//===----------------------------------------------------------------------===//
// Full Conversion
-LogicalResult
-mlir::applyFullConversion(ArrayRef<Operation *> ops,
- const ConversionTarget &target,
- const FrozenRewritePatternSet &patterns) {
- OperationConverter opConverter(target, patterns, OpConversionMode::Full);
+LogicalResult mlir::applyFullConversion(ArrayRef<Operation *> ops,
+ const ConversionTarget &target,
+ const FrozenRewritePatternSet &patterns,
+ ConversionConfig config) {
+ OperationConverter opConverter(target, patterns, config,
+ OpConversionMode::Full);
return opConverter.convertOperations(ops);
}
-LogicalResult
-mlir::applyFullConversion(Operation *op, const ConversionTarget &target,
- const FrozenRewritePatternSet &patterns) {
- return applyFullConversion(llvm::ArrayRef(op), target, patterns);
+LogicalResult mlir::applyFullConversion(Operation *op,
+ const ConversionTarget &target,
+ const FrozenRewritePatternSet &patterns,
+ ConversionConfig config) {
+ return applyFullConversion(llvm::ArrayRef(op), target, patterns, config);
}
//===----------------------------------------------------------------------===//
// Analysis Conversion
-LogicalResult
-mlir::applyAnalysisConversion(ArrayRef<Operation *> ops,
- ConversionTarget &target,
- const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> &convertedOps,
- function_ref<void(Diagnostic &)> notifyCallback) {
- OperationConverter opConverter(target, patterns, OpConversionMode::Analysis,
- &convertedOps);
- return opConverter.convertOperations(ops, notifyCallback);
+LogicalResult mlir::applyAnalysisConversion(
+ ArrayRef<Operation *> ops, ConversionTarget &target,
+ const FrozenRewritePatternSet &patterns, ConversionConfig config) {
+ OperationConverter opConverter(target, patterns, config,
+ OpConversionMode::Analysis);
+ return opConverter.convertOperations(ops);
}
LogicalResult
mlir::applyAnalysisConversion(Operation *op, ConversionTarget &target,
const FrozenRewritePatternSet &patterns,
- DenseSet<Operation *> &convertedOps,
- function_ref<void(Diagnostic &)> notifyCallback) {
- return applyAnalysisConversion(llvm::ArrayRef(op), target, patterns,
- convertedOps, notifyCallback);
+ ConversionConfig config) {
+ return applyAnalysisConversion(llvm::ArrayRef(op), target, patterns, config);
}
diff --git a/mlir/test/CAPI/llvm.c b/mlir/test/CAPI/llvm.c
index 5a78fac91a50..1817988dd67d 100644
--- a/mlir/test/CAPI/llvm.c
+++ b/mlir/test/CAPI/llvm.c
@@ -15,6 +15,7 @@
#include "mlir-c/Support.h"
#include <assert.h>
+#include <inttypes.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
@@ -105,7 +106,7 @@ static int testStructTypeCreation(MlirContext ctx) {
// CHECK: i8
// CHECK: i32
// CHECK: i64
- fprintf(stderr, "num elements: %ld\n",
+ fprintf(stderr, "num elements: %" PRIdPTR "\n",
mlirLLVMStructTypeGetNumElementTypes(literal));
for (intptr_t i = 0; i < 3; ++i) {
mlirTypeDump(mlirLLVMStructTypeGetElementType(literal, i));
diff --git a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
index 3de2f11d1d12..56129dbd2788 100644
--- a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
+++ b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir
@@ -77,6 +77,18 @@ func.func @log1p_2dvector_fmf(%arg0 : vector<4x3xf32>) {
// -----
+// CHECK-LABEL: func @log1p_scalable_vector(
+// CHECK-SAME: %[[VEC:.*]]: vector<[4]xf32>
+func.func @log1p_scalable_vector(%arg0 : vector<[4]xf32>) -> vector<[4]xf32> {
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<[4]xf32>) : vector<[4]xf32>
+ // CHECK: %[[ADD:.*]] = llvm.fadd %[[ONE]], %[[VEC]] : vector<[4]xf32>
+ // CHECK: %[[LOG:.*]] = llvm.intr.log(%[[ADD]]) : (vector<[4]xf32>) -> vector<[4]xf32>
+ %0 = math.log1p %arg0 : vector<[4]xf32>
+ func.return %0 : vector<[4]xf32>
+}
+
+// -----
+
// CHECK-LABEL: func @expm1(
// CHECK-SAME: f32
func.func @expm1(%arg0 : f32) {
@@ -113,6 +125,18 @@ func.func @expm1_vector(%arg0 : vector<4xf32>) {
// -----
+// CHECK-LABEL: func @expm1_scalable_vector(
+// CHECK-SAME: %[[VEC:.*]]: vector<[4]xf32>
+func.func @expm1_scalable_vector(%arg0 : vector<[4]xf32>) -> vector<[4]xf32> {
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<[4]xf32>) : vector<[4]xf32>
+ // CHECK: %[[EXP:.*]] = llvm.intr.exp(%[[VEC]]) : (vector<[4]xf32>) -> vector<[4]xf32>
+ // CHECK: %[[SUB:.*]] = llvm.fsub %[[EXP]], %[[ONE]] : vector<[4]xf32>
+ %0 = math.expm1 %arg0 : vector<[4]xf32>
+ func.return %0 : vector<[4]xf32>
+}
+
+// -----
+
// CHECK-LABEL: func @expm1_vector_fmf(
// CHECK-SAME: vector<4xf32>
func.func @expm1_vector_fmf(%arg0 : vector<4xf32>) {
@@ -177,6 +201,16 @@ func.func @cttz_vec(%arg0 : vector<4xi32>) {
// -----
+// CHECK-LABEL: func @cttz_scalable_vec(
+// CHECK-SAME: %[[VEC:.*]]: vector<[4]xi32>
+func.func @cttz_scalable_vec(%arg0 : vector<[4]xi32>) -> vector<[4]xi32> {
+ // CHECK: "llvm.intr.cttz"(%[[VEC]]) <{is_zero_poison = false}> : (vector<[4]xi32>) -> vector<[4]xi32>
+ %0 = math.cttz %arg0 : vector<[4]xi32>
+ func.return %0 : vector<[4]xi32>
+}
+
+// -----
+
// CHECK-LABEL: func @ctpop(
// CHECK-SAME: i32
func.func @ctpop(%arg0 : i32) {
@@ -197,6 +231,16 @@ func.func @ctpop_vector(%arg0 : vector<3xi32>) {
// -----
+// CHECK-LABEL: func @ctpop_scalable_vector(
+// CHECK-SAME: %[[VEC:.*]]: vector<[4]xi32>
+func.func @ctpop_scalable_vector(%arg0 : vector<[4]xi32>) -> vector<[4]xi32> {
+ // CHECK: llvm.intr.ctpop(%[[VEC]]) : (vector<[4]xi32>) -> vector<[4]xi32>
+ %0 = math.ctpop %arg0 : vector<[4]xi32>
+ func.return %0 : vector<[4]xi32>
+}
+
+// -----
+
// CHECK-LABEL: func @rsqrt_double(
// CHECK-SAME: f64
func.func @rsqrt_double(%arg0 : f64) {
@@ -233,6 +277,18 @@ func.func @rsqrt_vector(%arg0 : vector<4xf32>) {
// -----
+// CHECK-LABEL: func @rsqrt_scalable_vector(
+// CHECK-SAME: %[[VEC:.*]]: vector<[4]xf32>
+func.func @rsqrt_scalable_vector(%arg0 : vector<[4]xf32>) -> vector<[4]xf32>{
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<[4]xf32>) : vector<[4]xf32>
+ // CHECK: %[[SQRT:.*]] = llvm.intr.sqrt(%[[VEC]]) : (vector<[4]xf32>) -> vector<[4]xf32>
+ // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : vector<[4]xf32>
+ %0 = math.rsqrt %arg0 : vector<[4]xf32>
+ func.return %0 : vector<[4]xf32>
+}
+
+// -----
+
// CHECK-LABEL: func @rsqrt_vector_fmf(
// CHECK-SAME: vector<4xf32>
func.func @rsqrt_vector_fmf(%arg0 : vector<4xf32>) {
@@ -245,6 +301,18 @@ func.func @rsqrt_vector_fmf(%arg0 : vector<4xf32>) {
// -----
+// CHECK-LABEL: func @rsqrt_scalable_vector_fmf(
+// CHECK-SAME: %[[VEC:.*]]: vector<[4]xf32>
+func.func @rsqrt_scalable_vector_fmf(%arg0 : vector<[4]xf32>) -> vector<[4]xf32> {
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<[4]xf32>) : vector<[4]xf32>
+ // CHECK: %[[SQRT:.*]] = llvm.intr.sqrt(%[[VEC]]) {fastmathFlags = #llvm.fastmath<fast>} : (vector<[4]xf32>) -> vector<[4]xf32>
+ // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] {fastmathFlags = #llvm.fastmath<fast>} : vector<[4]xf32>
+ %0 = math.rsqrt %arg0 fastmath<fast> : vector<[4]xf32>
+ func.return %0 : vector<[4]xf32>
+}
+
+// -----
+
// CHECK-LABEL: func @rsqrt_multidim_vector(
func.func @rsqrt_multidim_vector(%arg0 : vector<4x3xf32>) {
// CHECK: %[[EXTRACT:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<4 x vector<3xf32>>
@@ -258,6 +326,19 @@ func.func @rsqrt_multidim_vector(%arg0 : vector<4x3xf32>) {
// -----
+// CHECK-LABEL: func @rsqrt_multidim_scalable_vector(
+func.func @rsqrt_multidim_scalable_vector(%arg0 : vector<4x[4]xf32>) -> vector<4x[4]xf32> {
+ // CHECK: %[[EXTRACT:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.array<4 x vector<[4]xf32>>
+ // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<[4]xf32>) : vector<[4]xf32>
+ // CHECK: %[[SQRT:.*]] = llvm.intr.sqrt(%[[EXTRACT]]) : (vector<[4]xf32>) -> vector<[4]xf32>
+ // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : vector<[4]xf32>
+ // CHECK: %[[INSERT:.*]] = llvm.insertvalue %[[DIV]], %{{.*}}[0] : !llvm.array<4 x vector<[4]xf32>>
+ %0 = math.rsqrt %arg0 : vector<4x[4]xf32>
+ func.return %0 : vector<4x[4]xf32>
+}
+
+// -----
+
// CHECK-LABEL: func @fpowi(
// CHECK-SAME: f64
func.func @fpowi(%arg0 : f64, %arg1 : i32) {
diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
index febe74e87674..1fa783f05f04 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
@@ -759,6 +759,21 @@ func.func @test_i8(%arg0: tensor<1xi8>) -> () {
// -----
+// CHECK-LABEL: @test_i64
+func.func @test_i64(%arg0: tensor<1xi64>) -> () {
+ // CHECK: linalg.generic
+ // CHECK: ^bb0(%[[ARG1:.+]]: i64,
+ // CHECK-DAG: %[[C127:.+]] = arith.constant -9223372036854775808
+ // CHECK-DAG: %[[C126:.+]] = arith.constant 9223372036854775807
+ // CHECK-DAG: %[[LOWER:.+]] = arith.maxsi %[[C127]], %[[ARG1]]
+ // CHECK-DAG: %[[CLAMPED:.+]] = arith.minsi %[[C126]], %[[LOWER]]
+ %0 = tosa.clamp %arg0 {min_int = -9223372036854775808 : i64, max_int = 9223372036854775807 : i64, min_fp = 0.0 : f32, max_fp = 0.0 : f32} : (tensor<1xi64>) -> tensor<1xi64>
+
+ return
+}
+
+// -----
+
// CHECK-LABEL: @test_clamp_f16
func.func @test_clamp_f16(%arg0: tensor<1xf16>) -> () {
// CHECK: linalg.generic
diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 7adde3117dee..206d7e9f1ce8 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -102,17 +102,16 @@ func.func @tensor.cast.unranked(%a : tensor<*xf32>, %b : tensor<*xf32>, %c : ten
// -----
// CHECK-LABEL: func @linalg_effects(
-// CHECK-SAME: %[[A:[a-z0-9]*]]: tensor<?x?xf32>
-// CHECK-SAME: %[[B:[a-z0-9]*]]: memref<?x?xf32>
-// CHECK-SAME: %[[C:[a-z0-9]*]]: tensor<?x?xf32>
-func.func @linalg_effects(%a : tensor<?x?xf32>, %b : memref<?x?xf32>, %c : tensor<?x?xf32>) {
+func.func @linalg_effects(
+ %a : tensor<?x?xf32>, %b : tensor<?x?xf32>, %c : tensor<?x?xf32>,
+ %d : memref<?x?xf32>, %e : memref<?x?xf32>, %f : memref<?x?xf32>) {
// CHECK-NOT: %{{.*}} = linalg.matmul
- %t = linalg.matmul ins(%a, %b : tensor<?x?xf32>, memref<?x?xf32>)
+ %t = linalg.matmul ins(%a, %b : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%c : tensor<?x?xf32>) -> tensor<?x?xf32>
// CHECK: linalg.matmul
- linalg.matmul ins(%a, %c : tensor<?x?xf32>, tensor<?x?xf32>)
- outs(%b : memref<?x?xf32>)
+ linalg.matmul ins(%d, %e : memref<?x?xf32>, memref<?x?xf32>)
+ outs(%f : memref<?x?xf32>)
return
}
@@ -889,11 +888,11 @@ func.func @fold_multi_use_generic_op_with_consumer(%arg0 : tensor<?x?x?xf32>) ->
// -----
#map = affine_map<(d0) -> (d0)>
-func.func @identity_mixed(%arg0 : tensor<?xf32>, %arg1: memref<?xf32>) {
+func.func @identity_buffer(%arg0 : memref<?xf32>, %arg1: memref<?xf32>) {
linalg.generic {
indexing_maps = [#map, #map],
iterator_types = ["parallel"]
- } ins(%arg0 : tensor<?xf32>)
+ } ins(%arg0 : memref<?xf32>)
outs(%arg1 : memref<?xf32>) {
^bb0(%arg2 : f32, %arg3 : f32):
linalg.yield %arg2 : f32
@@ -901,14 +900,13 @@ func.func @identity_mixed(%arg0 : tensor<?xf32>, %arg1: memref<?xf32>) {
return
}
-// There was a crash in EraseIdentityGenericOp for generic with mixed semantics.
-// For now, check generic remained unchanged.
-// CHECK-LABEL: func @identity_mixed
-// CHECK-SAME: (%[[ARG1:.*]]: tensor<?xf32>, %[[ARG2:.*]]: memref<?xf32>)
+// Do not erase ops with buffer semantics.
+// CHECK-LABEL: func @identity_buffer
+// CHECK-SAME: (%[[ARG1:.*]]: memref<?xf32>, %[[ARG2:.*]]: memref<?xf32>)
// CHECK: linalg.generic {
// CHECK-SAME: indexing_maps = [#map, #map],
// CHECK-SAME: iterator_types = ["parallel"]
-// CHECK-SAME: } ins(%[[ARG1]] : tensor<?xf32>)
+// CHECK-SAME: } ins(%[[ARG1]] : memref<?xf32>)
// CHECK-SAME: outs(%[[ARG2]] : memref<?xf32>) {
// -----
@@ -916,12 +914,12 @@ func.func @identity_mixed(%arg0 : tensor<?xf32>, %arg1: memref<?xf32>) {
// Just make sure that we don't crash.
// CHECK-LABEL: func @dedeplicate_regression_test
-func.func @dedeplicate_regression_test(%0: tensor<4xf32>, %1: memref<4xf32>) {
+func.func @dedeplicate_regression_test(%0: tensor<4xf32>, %1: tensor<4xf32>) {
%36 = linalg.generic
{indexing_maps = [affine_map<(d0) -> (d0)>,
affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
- ins(%1, %1 : memref<4xf32>, memref<4xf32>)
+ ins(%1, %1 : tensor<4xf32>, tensor<4xf32>)
outs(%0 : tensor<4xf32>) {
^bb0(%in: f32, %in_24: f32, %out: f32):
linalg.yield %in : f32
@@ -937,31 +935,6 @@ func.func @dedeplicate_regression_test(%0: tensor<4xf32>, %1: memref<4xf32>) {
// -----
-#map = affine_map<(d0) -> (d0)>
-func.func @cast_producer_mixed(%arg0 : tensor<5xf32>, %arg1: memref<?xf32>) {
- %0 = tensor.cast %arg0 : tensor<5xf32> to tensor<?xf32>
- linalg.generic {
- indexing_maps = [#map, #map],
- iterator_types = ["parallel"]
- } ins(%0 : tensor<?xf32>)
- outs(%arg1 : memref<?xf32>) {
- ^bb0(%arg2 : f32, %arg3 : f32):
- linalg.yield %arg2 : f32
- }
- return
-}
-
-// We need a mixed linalg as a bridge between tensor and memref worlds.
-// CHECK-LABEL: func @cast_producer_mixed
-// CHECK-SAME: (%[[ARG1:.*]]: tensor<5xf32>, %[[ARG2:.*]]: memref<?xf32>)
-// CHECK: linalg.generic {
-// CHECK-SAME: indexing_maps = [#map, #map],
-// CHECK-SAME: iterator_types = ["parallel"]
-// CHECK-SAME: } ins(%[[ARG1]] : tensor<5xf32>)
-// CHECK-SAME: outs(%[[ARG2]] : memref<?xf32>) {
-
-// -----
-
// CHECK-LABEL: dead_softmax
func.func @dead_softmax(%arg0: tensor<16x64x256xf32>) -> tensor<16x64x256xf32> {
%0 = tensor.empty() : tensor<16x64x256xf32>
diff --git a/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir b/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir
index 9d8421cbab49..15a4f6cdd3bb 100644
--- a/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir
+++ b/mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir
@@ -1110,43 +1110,3 @@ module {
// CHECK-DAG: %[[T3:.+]] = arith.addf %[[T2]], %[[B1]]
// CHECK: linalg.yield %[[T3]] : f32
// CHECK: return %[[GENERIC]]
-
-// -----
-
-// CHECK-DAG: [[$MAP0:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0, d1)>
-#map0 = affine_map<(d0, d1) -> (d0, d1)>
-
-// CHECK-LABEL: @mixed_fusion
-func.func @mixed_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : tensor<?x?xf32>, %arg8 : memref<?x?xf32>)
-{
- %c0 = arith.constant 0 : index
- %c1 = arith.constant 1 : index
- %0 = tensor.dim %arg0, %c0 : tensor<?x?xf32>
- %1 = tensor.dim %arg0, %c1 : tensor<?x?xf32>
- %2 = tensor.empty(%0, %1) : tensor<?x?xf32>
- %3 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]}
- ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
- outs(%2 : tensor<?x?xf32>) {
- ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
- %4 = arith.addf %arg3, %arg4 : f32
- linalg.yield %4 : f32
- } -> tensor<?x?xf32>
- // CHECK: linalg.generic {
- // CHECK-SAME: indexing_maps = {{\[}}[[$MAP0]], [[$MAP0]], [[$MAP0]], [[$MAP0]]{{\]}}
- linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]}
- ins(%3, %arg2 : tensor<?x?xf32>, tensor<?x?xf32>)
- outs(%arg8 : memref<?x?xf32>) {
- // CHECK: ^{{[a-zA-Z0-9_]*}}
- // CHECK-SAME: [[ARG0:%[a-zA-Z0-9_]*]]
- // CHECK-SAME: [[ARG1:%[a-zA-Z0-9_]*]]
- // CHECK-SAME: [[ARG2:%[a-zA-Z0-9_]*]]
- ^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
- // CHECK: [[T1:%[a-zA-Z0-9_]*]] = arith.addf [[ARG0]], [[ARG1]]
- // CHECK-NOT: linalg.yield
- // CHECK: arith.mulf [[T1]], [[ARG2]]
- // CHECK: linalg.yield
- %5 = arith.mulf %arg5, %arg6 : f32
- linalg.yield %5 : f32
- }
- return
-}
diff --git a/mlir/test/Dialect/Linalg/invalid.mlir b/mlir/test/Dialect/Linalg/invalid.mlir
index 916c04f33e9c..44c81c31ace0 100644
--- a/mlir/test/Dialect/Linalg/invalid.mlir
+++ b/mlir/test/Dialect/Linalg/invalid.mlir
@@ -770,3 +770,13 @@ func.func @mmt4d_rank_mismatch(%A: tensor<16x16x8x1xf32>,
-> tensor<8x8xf32>
return %res : tensor<8x8xf32>
}
+
+// -----
+
+func.func @mixed_semantics(%a: tensor<?x?xf32>, %b: tensor<?x?xf32>, %c: memref<?x?xf32>) {
+ // expected-error @+1 {{expected to have pure tensor or buffer semantics}}
+ linalg.matmul ins(%a, %b: tensor<?x?xf32>, tensor<?x?xf32>)
+ outs(%c: memref<?x?xf32>)
+ return
+}
+
diff --git a/mlir/test/Dialect/Mesh/spmdization.mlir b/mlir/test/Dialect/Mesh/spmdization.mlir
index 2fb8029dfe64..572d3eb55eaa 100644
--- a/mlir/test/Dialect/Mesh/spmdization.mlir
+++ b/mlir/test/Dialect/Mesh/spmdization.mlir
@@ -127,3 +127,17 @@ func.func @multiple_chained_ops(
// CHECK: return %[[RESHARD3]] : tensor<1xi8>
return %7 : tensor<2xi8>
}
+
+// CHECK-LABEL: func @incomplete_sharding
+func.func @incomplete_sharding(
+ // CHECK-SAME: %[[ARG:.*]]: tensor<4x16xf32>
+ %arg0: tensor<8x16xf32>
+// CHECK-SAME: -> tensor<4x16xf32> {
+) -> tensor<8x16xf32> {
+ %0 = mesh.shard %arg0 to <@mesh_1d, [[0]]> annotate_for_users : tensor<8x16xf32>
+ // CHECK: %[[RES:.*]] = tosa.sigmoid %[[ARG]] : (tensor<4x16xf32>) -> tensor<4x16xf32>
+ %1 = tosa.sigmoid %0 : (tensor<8x16xf32>) -> tensor<8x16xf32>
+ %2 = mesh.shard %1 to <@mesh_1d, [[0]]> : tensor<8x16xf32>
+ // CHECK: return %[[RES]] : tensor<4x16xf32>
+ return %2 : tensor<8x16xf32>
+}
diff --git a/mlir/test/Dialect/Tosa/ops.mlir b/mlir/test/Dialect/Tosa/ops.mlir
index 3d68464ebf0b..01b27072a4b6 100644
--- a/mlir/test/Dialect/Tosa/ops.mlir
+++ b/mlir/test/Dialect/Tosa/ops.mlir
@@ -376,6 +376,13 @@ func.func @test_clz(%arg0: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> {
}
// -----
+// CHECK-LABEL: cos
+func.func @test_cos(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
+ %0 = tosa.cos %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32>
+ return %0 : tensor<13x21x3xf32>
+}
+
+// -----
// CHECK-LABEL: exp
func.func @test_exp(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = tosa.exp %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32>
@@ -425,6 +432,13 @@ func.func @test_rsqrt(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
}
// -----
+// CHECK-LABEL: sin
+func.func @test_sin(%arg0: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
+ %0 = tosa.sin %arg0 : (tensor<13x21x3xf32>) -> tensor<13x21x3xf32>
+ return %0 : tensor<13x21x3xf32>
+}
+
+// -----
// CHECK-LABEL: select
func.func @test_select(%arg0: tensor<1x1x1xi1>, %arg1: tensor<13x21x3xf32>, %arg2: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
%0 = tosa.select %arg0, %arg1, %arg2 : (tensor<1x1x1xi1>, tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32>
diff --git a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
index 02063a81664b..94e78ce40a3c 100644
--- a/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
+++ b/mlir/test/Dialect/Vector/vector-rewrite-narrow-types.mlir
@@ -195,53 +195,89 @@ func.func @f3ext(%a: vector<5xi8>) -> vector<8xi17> {
// CHECK-LABEL: func.func @aligned_extsi(
func.func @aligned_extsi(%a: vector<8xi4>) -> vector<8xi32> {
- // CHECK: arith.shli
- // CHECK: arith.shrsi
- // CHECK: arith.shrsi
- // CHECK: vector.shuffle
- // CHECK: arith.extsi %{{.*}} : vector<8xi8> to vector<8xi32>
+// CHECK-SAME: %[[IN:.*]]: vector<8xi4>) -> vector<8xi32> {
+// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<4xi8>
+// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[IN]] : vector<8xi4> to vector<4xi8>
+// CHECK: %[[SHL_LOW:.*]] = arith.shli %[[BITCAST]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[LOW:.*]] = arith.shrsi %[[SHL_LOW]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[HIGH:.*]] = arith.shrsi %[[BITCAST]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<4xi8>
+// CHECK: %[[I32:.*]] = arith.extsi %[[INTERLEAVE]] : vector<8xi8> to vector<8xi32>
%0 = arith.extsi %a : vector<8xi4> to vector<8xi32>
return %0 : vector<8xi32>
}
+// CHECK-LABEL: func.func @aligned_extsi_2d(
+func.func @aligned_extsi_2d(%a: vector<8x32xi4>) -> vector<8x32xi32> {
+// CHECK-SAME: %[[IN:.*]]: vector<8x32xi4>) -> vector<8x32xi32> {
+// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<8x16xi8>
+// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[IN]] : vector<8x32xi4> to vector<8x16xi8>
+// CHECK: %[[SHL_LOW:.*]] = arith.shli %[[BITCAST]], %[[I4_BITS]] : vector<8x16xi8>
+// CHECK: %[[LOW:.*]] = arith.shrsi %[[SHL_LOW]], %[[I4_BITS]] : vector<8x16xi8>
+// CHECK: %[[HIGH:.*]] = arith.shrsi %[[BITCAST]], %[[I4_BITS]] : vector<8x16xi8>
+// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<8x16xi8>
+// CHECK: %[[I32:.*]] = arith.extsi %[[INTERLEAVE]] : vector<8x32xi8> to vector<8x32xi32>
+ %0 = arith.extsi %a : vector<8x32xi4> to vector<8x32xi32>
+ return %0 : vector<8x32xi32>
+}
+
// CHECK-LABEL: func.func @aligned_extsi_base_case(
func.func @aligned_extsi_base_case(%a: vector<8xi4>) -> vector<8xi8> {
- // CHECK: arith.shli
- // CHECK: arith.shrsi
- // CHECK: arith.shrsi
- // CHECK: vector.shuffle
- // CHECK-NOT: arith.extsi
+// CHECK-SAME: %[[IN:.*]]: vector<8xi4>) -> vector<8xi8> {
+// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<4xi8>
+// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[IN]] : vector<8xi4> to vector<4xi8>
+// CHECK: %[[SHL_LOW:.*]] = arith.shli %[[BITCAST]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[LOW:.*]] = arith.shrsi %[[SHL_LOW]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[HIGH:.*]] = arith.shrsi %[[BITCAST]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<4xi8>
%0 = arith.extsi %a : vector<8xi4> to vector<8xi8>
return %0 : vector<8xi8>
}
// CHECK-LABEL: func.func @aligned_sitofp(
func.func @aligned_sitofp(%a: vector<8xi4>) -> vector<8xf32> {
- // CHECK: arith.shli
- // CHECK: arith.shrsi
- // CHECK: arith.shrsi
- // CHECK: shuffle
- // CHECK: arith.sitofp %{{.*}} : vector<8xi8> to vector<8xf32>
+// CHECK-SAME: %[[IN:.*]]: vector<8xi4>) -> vector<8xf32> {
+// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<4xi8>
+// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[IN]] : vector<8xi4> to vector<4xi8>
+// CHECK: %[[SHL_LOW:.*]] = arith.shli %[[BITCAST]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[LOW:.*]] = arith.shrsi %[[SHL_LOW]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[HIGH:.*]] = arith.shrsi %[[BITCAST]], %[[I4_BITS]] : vector<4xi8>
+// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<4xi8>
+// CHECK: %[[F32:.*]] = arith.sitofp %[[INTERLEAVE]] : vector<8xi8> to vector<8xf32>
%0 = arith.sitofp %a : vector<8xi4> to vector<8xf32>
return %0 : vector<8xf32>
}
+// CHECK-LABEL: func.func @aligned_sitofp_2d(
+func.func @aligned_sitofp_2d(%a: vector<8x32xi4>) -> vector<8x32xf32> {
+// CHECK-SAME: %[[IN:.*]]: vector<8x32xi4>) -> vector<8x32xf32> {
+// CHECK: %[[I4_BITS:.*]] = arith.constant dense<4> : vector<8x16xi8>
+// CHECK: %[[BITCAST:.*]] = vector.bitcast %[[IN]] : vector<8x32xi4> to vector<8x16xi8>
+// CHECK: %[[SHL_LOW:.*]] = arith.shli %[[BITCAST]], %[[I4_BITS]] : vector<8x16xi8>
+// CHECK: %[[LOW:.*]] = arith.shrsi %[[SHL_LOW]], %[[I4_BITS]] : vector<8x16xi8>
+// CHECK: %[[HIGH:.*]] = arith.shrsi %[[BITCAST]], %[[I4_BITS]] : vector<8x16xi8>
+// CHECK: %[[INTERLEAVE:.*]] = vector.interleave %[[LOW]], %[[HIGH]] : vector<8x16xi8>
+// CHECK: %[[F32:.*]] = arith.sitofp %[[INTERLEAVE]] : vector<8x32xi8> to vector<8x32xf32>
+ %0 = arith.sitofp %a : vector<8x32xi4> to vector<8x32xf32>
+ return %0 : vector<8x32xf32>
+}
+
// CHECK-LABEL: func.func @i4_transpose(
-// CHECK-SAME: %[[A:[0-9a-z]*]]
func.func @i4_transpose(%a: vector<8x16xi4>) -> vector<16x8xi4> {
- // CHECK: %[[EXT:.*]] = arith.extsi %[[A]] : vector<8x16xi4> to vector<8x16xi8>
- // CHECK: %[[TRANS:.*]] = vector.transpose %[[EXT]], [1, 0] : vector<8x16xi8> to vector<16x8xi8>
- // CHECK: %[[TRUNC:.*]] = arith.trunci %[[TRANS]] : vector<16x8xi8> to vector<16x8xi4>
+// CHECK-SAME: %[[IN:.*]]: vector<8x16xi4>) -> vector<16x8xi4> {
+// CHECK: %[[EXT:.*]] = vector.interleave
+// CHECK: %[[TRANS:.*]] = vector.transpose %[[EXT]], [1, 0] : vector<8x16xi8> to vector<16x8xi8>
+// CHECK: %[[TRUNC:.*]] = arith.trunci %[[TRANS]] : vector<16x8xi8> to vector<16x8xi4>
%0 = vector.transpose %a, [1, 0] : vector<8x16xi4> to vector<16x8xi4>
return %0 : vector<16x8xi4>
}
// CHECK-LABEL: func.func @i7_transpose(
-// CHECK-SAME: %[[A:[0-9a-z]*]]
func.func @i7_transpose(%a: vector<8x16xi7>) -> vector<16x8xi7> {
- // CHECK: %[[EXT:.*]] = arith.extsi %[[A]] : vector<8x16xi7> to vector<8x16xi8>
- // CHECK: %[[TRANS:.*]] = vector.transpose %[[EXT]], [1, 0] : vector<8x16xi8> to vector<16x8xi8>
- // CHECK: %[[TRUNC:.*]] = arith.trunci %[[TRANS]] : vector<16x8xi8> to vector<16x8xi7>
+// CHECK-SAME: %[[IN:.*]]: vector<8x16xi7>) -> vector<16x8xi7> {
+// CHECK: %[[EXT:.*]] = arith.extsi %[[IN]] : vector<8x16xi7> to vector<8x16xi8>
+// CHECK: %[[TRANS:.*]] = vector.transpose %[[EXT]], [1, 0] : vector<8x16xi8> to vector<16x8xi8>
+// CHECK: %[[TRUNC:.*]] = arith.trunci %[[TRANS]] : vector<16x8xi8> to vector<16x8xi7>
%0 = vector.transpose %a, [1, 0] : vector<8x16xi7> to vector<16x8xi7>
return %0 : vector<16x8xi7>
}
diff --git a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
index 1775b5fa4a34..788ae9ac044e 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-flatten.mlir
@@ -83,7 +83,7 @@ func.func @transfer_read_dims_mismatch_non_zero_indices(
return
}
-// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1] -> (s0 * 4 + s1 * 43)>
+// CHECK: #[[$ATTR_0:.+]] = affine_map<()[s0, s1] -> (s0 * 24 + s1 * 6)>
// CHECK-LABEL: func.func @transfer_read_dims_mismatch_non_zero_indices(
// CHECK-SAME: %[[IDX_1:.*]]: index, %[[IDX_2:.*]]: index,
@@ -92,7 +92,7 @@ func.func @transfer_read_dims_mismatch_non_zero_indices(
// CHECK: %[[C_0:.*]] = arith.constant 0 : i32
// CHECK: %[[C_0_IDX:.*]] = arith.constant 0 : index
// CHECK: %[[COLLAPSED_IN:.*]] = memref.collapse_shape %[[M_IN]] {{\[}}[0], [1, 2, 3]] : memref<1x43x4x6xi32> into memref<1x1032xi32>
-// CHECK: %[[COLLAPSED_IDX:.*]] = affine.apply #[[$ATTR_0]]()[%[[IDX_2]], %[[IDX_1]]]
+// CHECK: %[[COLLAPSED_IDX:.*]] = affine.apply #[[$ATTR_0]]()[%[[IDX_1]], %[[IDX_2]]]
// CHECK: %[[READ:.*]] = vector.transfer_read %[[COLLAPSED_IN]][%[[C_0_IDX]], %[[COLLAPSED_IDX]]], %[[C_0]] {in_bounds = [true]} : memref<1x1032xi32>, vector<12xi32>
// CHECK: %[[COLLAPSED_OUT:.*]] = memref.collapse_shape %[[M_OUT]] {{\[}}[0, 1, 2]] : memref<1x2x6xi32> into memref<12xi32>
// CHECK: vector.transfer_write %[[READ]], %[[COLLAPSED_OUT]][%[[C_0_IDX]]] {in_bounds = [true]} : vector<12xi32>, memref<12xi32>
@@ -459,3 +459,37 @@ func.func @fold_unit_dims_entirely(%arg0 : vector<8xi32>,
// CHECK-128B-LABEL: func @fold_unit_dims_entirely(
// CHECK-128B-NOT: memref.collapse_shape
+
+// -----
+
+func.func @regression_non_contiguous_dim_read(%subview : memref<1x3x3x2xf32, strided<[40, 10, 2, 1], offset: ?>>,
+ %idx0 : index, %idx1 : index) -> vector<2x2xf32> {
+ %c0 = arith.constant 0 : index
+ %cst_1 = arith.constant 0.000000e+00 : f32
+ %8 = vector.transfer_read %subview[%c0, %idx0, %idx1, %c0], %cst_1 {in_bounds = [true, true]} : memref<1x3x3x2xf32, strided<[40, 10, 2, 1], offset: ?>>, vector<2x2xf32>
+ return %8 : vector<2x2xf32>
+}
+
+// CHECK: #[[$MAP:.+]] = affine_map<()[s0] -> (s0 * 2)>
+// CHECK-LABEL: func.func @regression_non_contiguous_dim_read(
+// CHECK: %[[COLLAPSE:.+]] = memref.collapse_shape %{{.*}} {{\[}}[0], [1], [2, 3]] : memref<1x3x3x2xf32, strided<[40, 10, 2, 1], offset: ?>> into memref<1x3x6xf32, strided<[40, 10, 1], offset: ?>>
+// CHECK: %[[APPLY:.*]] = affine.apply #[[$MAP]]()
+
+// CHECK-128B-LABEL: func @regression_non_contiguous_dim_read(
+// CHECK-128B: memref.collapse_shape
+
+// -----
+
+func.func @unsupported_non_contiguous_dim_write(%value : vector<2x2xf32>,
+ %subview : memref<1x3x3x2xf32, strided<[40, 10, 2, 1], offset: ?>>,
+ %idx0 : index, %idx1 : index) {
+ %c0 = arith.constant 0 : index
+ vector.transfer_write %value, %subview[%c0, %idx0, %idx1, %c0] {in_bounds = [true, true]} : vector<2x2xf32>, memref<1x3x3x2xf32, strided<[40, 10, 2, 1], offset: ?>>
+ return
+}
+
+// CHECK-LABEL: func.func @unsupported_non_contiguous_dim_write(
+// CHECK-NOT: memref.collapse_shape
+
+// CHECK-128B-LABEL: func @unsupported_non_contiguous_dim_write(
+// CHECK-128B-NOT: memref.collapse_shape
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
index 44ff1afe76d3..12f13e8dbc4a 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/fill-2d.mlir
@@ -1,13 +1,8 @@
// RUN: mlir-opt %s \
-// RUN: -transform-interpreter \
-// RUN: -test-transform-dialect-erase-schedule \
+// RUN: -transform-interpreter -test-transform-dialect-erase-schedule \
// RUN: -lower-vector-mask \
// RUN: -one-shot-bufferize="bufferize-function-boundaries" \
-// RUN: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// RUN: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// RUN: -allocate-arm-sme-tiles -convert-arm-sme-to-scf \
-// RUN: -convert-arm-sme-to-llvm -cse -canonicalize \
-// RUN: -test-lower-to-llvm | \
+// RUN: -test-lower-to-arm-sme -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd \
// RUN: -e=entry -entry-point-result=void \
// RUN: -march=aarch64 -mattr="+sve,+sme" \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir
index c781d5e0af84..34c5351c8703 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul-transpose-a.mlir
@@ -1,12 +1,7 @@
// RUN: mlir-opt %s \
// RUN: -transform-interpreter -test-transform-dialect-erase-schedule \
-// RUN: -one-shot-bufferize="bufferize-function-boundaries" -canonicalize \
-// RUN: -convert-vector-to-arm-sme -allocate-arm-sme-tiles -convert-arm-sme-to-scf \
-// RUN: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// RUN: -convert-vector-to-scf -cse -arm-sve-legalize-vector-storage \
-// RUN: -convert-arm-sme-to-llvm \
-// RUN: -convert-vector-to-llvm=enable-arm-sve \
-// RUN: -cse -canonicalize -test-lower-to-llvm | \
+// RUN: -one-shot-bufferize="bufferize-function-boundaries" \
+// RUN: -test-lower-to-arm-sme -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd \
// RUN: -e=main -entry-point-result=void \
// RUN: -march=aarch64 -mattr="+sve,+sme" \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul.mlir
index 31c3202c3fc5..2bfdaa8e8a2b 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/matmul.mlir
@@ -1,12 +1,6 @@
// RUN: mlir-opt %s \
// RUN: -transform-interpreter -test-transform-dialect-erase-schedule \
-// RUN: -canonicalize \
-// RUN: -convert-vector-to-arm-sme -allocate-arm-sme-tiles -convert-arm-sme-to-scf \
-// RUN: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// RUN: -convert-vector-to-scf -cse -arm-sve-legalize-vector-storage \
-// RUN: -convert-arm-sme-to-llvm \
-// RUN: -convert-vector-to-llvm=enable-arm-sve \
-// RUN: -cse -canonicalize -test-lower-to-llvm | \
+// RUN: -test-lower-to-arm-sme -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd \
// RUN: -e=main -entry-point-result=void \
// RUN: -march=aarch64 -mattr="+sve,+sme" \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul.mlir
index d5c35068ccb3..e376bdde24a1 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/multi-tile-matmul.mlir
@@ -1,11 +1,7 @@
// RUN: mlir-opt %s \
// RUN: -transform-interpreter -test-transform-dialect-erase-schedule \
// RUN: -one-shot-bufferize="bufferize-function-boundaries" -canonicalize \
-// RUN: -arm-sme-vector-legalization -canonicalize -cse \
-// RUN: -convert-vector-to-arm-sme -allocate-arm-sme-tiles -convert-arm-sme-to-scf \
-// RUN: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// RUN: -convert-vector-to-scf=full-unroll -convert-arm-sme-to-llvm \
-// RUN: -test-lower-to-llvm | \
+// RUN: -test-lower-to-arm-sme -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd \
// RUN: -e=main -entry-point-result=void \
// RUN: -march=aarch64 -mattr="+sve,+sme" \
diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
index 42fe21cccd48..ee3866de303e 100644
--- a/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
+++ b/mlir/test/Integration/Dialect/Linalg/CPU/ArmSME/use-too-many-tiles.mlir
@@ -1,10 +1,5 @@
// RUN: mlir-opt %s \
-// RUN: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// RUN: -allocate-arm-sme-tiles -convert-arm-sme-to-scf \
-// RUN: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// RUN: -convert-vector-to-scf -cse -arm-sve-legalize-vector-storage \
-// RUN: -convert-arm-sme-to-llvm -convert-vector-to-llvm=enable-arm-sve -cse \
-// RUN: -canonicalize -test-lower-to-llvm -verify-diagnostics | \
+// RUN: -test-lower-to-arm-sme -test-lower-to-llvm -verify-diagnostics | \
// RUN: %mcr_aarch64_cmd \
// RUN: -e=main -entry-point-result=void \
// RUN: -march=aarch64 -mattr="+sve,+sme" \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
index 59b4a7e6a52f..06b1c107cb2c 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
@@ -1,9 +1,5 @@
// DEFINE: %{entry_point} = test_load_store_zaq0
-// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// DEFINE: -convert-vector-to-arm-sme -convert-arm-sme-to-scf \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -allocate-arm-sme-tiles -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
index 064141c34924..27be801252b8 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-load-vertical.mlir
@@ -1,9 +1,5 @@
// DEFINE: %{entry_point} = entry
-// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// DEFINE: -convert-vector-to-arm-sme -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
index 0827d9b7464a..9d836d93c85b 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-multi-tile-transpose.mlir
@@ -1,10 +1,4 @@
-// RUN: mlir-opt %s -arm-sme-vector-legalization -cse -canonicalize \
-// RUN: -convert-vector-to-arm-sme -allocate-arm-sme-tiles -convert-arm-sme-to-scf \
-// RUN: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// RUN: -convert-vector-to-scf -cse -arm-sve-legalize-vector-storage \
-// RUN: -convert-arm-sme-to-llvm \
-// RUN: -convert-vector-to-llvm=enable-arm-sve \
-// RUN: -cse -canonicalize -test-lower-to-llvm | \
+// RUN: mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd \
// RUN: -e=main -entry-point-result=void \
// RUN: -march=aarch64 -mattr="+sve,+sme" \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir
index f081838300a9..a06ad37b054e 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f16f16f32.mlir
@@ -1,11 +1,7 @@
+// DEFINE: %{opts} =
// DEFINE: %{entry} = main
-// DEFINE: %{fusion_opts} = -arm-sme-outer-product-fusion
// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -convert-vector-to-arm-sme -convert-arith-to-arm-sme %{fusion_opts} \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm -o %t
+// DEFINE: -test-lower-to-arm-sme=%{opts} -test-lower-to-llvm -o %t
// DEFINE: %{run} = %mcr_aarch64_cmd %t \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry} -entry-point-result=void \
@@ -18,7 +14,7 @@
// Check result is the same when outerproducts are not combined into widening
// variant.
-// REDEFINE: %{fusion_opts} =
+// REDEFINE: %{opts} = fuse-outer-products=false
// RUN: %{run} | FileCheck %s
func.func @main() {
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
index 5f41b37560e7..7e7869d1c957 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f32.mlir
@@ -1,10 +1,6 @@
// DEFINE: %{entry_point} = test_outerproduct_no_accumulator_4x4xf32
// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// DEFINE: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm -o %t
+// DEFINE: -test-lower-to-arm-sme -test-lower-to-llvm -o %t
// DEFINE: %{run} = %mcr_aarch64_cmd %t \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
index a1bb9b7d6f80..46bf799232ae 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-f64.mlir
@@ -1,10 +1,6 @@
// DEFINE: %{entry_point} = test_outerproduct_no_accumulator_2x2xf64
// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// DEFINE: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm -o %t
+// DEFINE: -test-lower-to-arm-sme -test-lower-to-llvm -o %t
// DEFINE: %{run} = %mcr_aarch64_cmd %t \
// DEFINE: -march=aarch64 -mattr=+sve,+sme-f64f64 \
// DEFINE: -e %{entry_point} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir
index 1770e579f0bd..9a353ec2d2f6 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-outerproduct-i8i8i32.mlir
@@ -1,11 +1,5 @@
// DEFINE: %{entry} = main
-// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// DEFINE: -arm-sme-outer-product-fusion \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
index 6e028d5fb836..52f56883cad9 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-read-2d.mlir
@@ -1,9 +1,5 @@
// DEFINE: %{entry_point} = entry
-// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -convert-vector-to-arm-sme -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
index c0c1f55d7ddd..710cc6672f00 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transfer-write-2d.mlir
@@ -1,10 +1,5 @@
// DEFINE: %{entry_point} = entry
-// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za only-if-required-by-ops" \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
index eee3c56351d8..88bc0d0709d4 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/test-transpose.mlir
@@ -1,9 +1,5 @@
// DEFINE: %{entry_point} = entry
-// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// DEFINE: -convert-vector-to-arm-sme -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=void \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
index 223bc8ce7434..e14917486d84 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/tile_fill.mlir
@@ -1,8 +1,4 @@
-// RUN: mlir-opt %s -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// RUN: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// RUN: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// RUN: -convert-arm-sme-to-llvm -cse -canonicalize \
-// RUN: -test-lower-to-llvm | \
+// RUN: mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm | \
// RUN: %mcr_aarch64_cmd \
// RUN: -march=aarch64 -mattr=+sve,+sme \
// RUN: -e entry -entry-point-result=i32 \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
index 2f151e2ec72f..b29790db14dd 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-load-store.mlir
@@ -1,9 +1,5 @@
// DEFINE: %{entry_point} = za0_d_f64
-// DEFINE: %{compile} = mlir-opt %s \
-// DEFINE: -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// DEFINE: -convert-vector-to-arm-sme -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -cse -canonicalize \
-// DEFINE: -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=i32 \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-ops.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-ops.mlir
index f28bf19b2999..c8c401bed144 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-ops.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/vector-ops.mlir
@@ -1,8 +1,5 @@
// DEFINE: %{entry_point} = entry
-// DEFINE: %{compile} = mlir-opt %s -enable-arm-streaming="streaming-mode=streaming-locally za-mode=new-za" \
-// DEFINE: -convert-vector-to-arm-sme -convert-arith-to-arm-sme \
-// DEFINE: -convert-arm-sme-to-scf -allocate-arm-sme-tiles \
-// DEFINE: -convert-arm-sme-to-llvm -test-lower-to-llvm
+// DEFINE: %{compile} = mlir-opt %s -test-lower-to-arm-sme -test-lower-to-llvm
// DEFINE: %{run} = %mcr_aarch64_cmd \
// DEFINE: -march=aarch64 -mattr=+sve,+sme \
// DEFINE: -e %{entry_point} -entry-point-result=i32 \
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir
new file mode 100644
index 000000000000..07989bd71f50
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-interleave.mlir
@@ -0,0 +1,25 @@
+// RUN: mlir-opt %s -test-lower-to-llvm | \
+// RUN: %mcr_aarch64_cmd -e entry -entry-point-result=void \
+// RUN: -shared-libs=%mlir_c_runner_utils,%mlir_arm_runner_utils \
+// RUN: -march=aarch64 -mattr=+sve | \
+// RUN: FileCheck %s
+
+func.func @entry() {
+ %f1 = arith.constant 1.0 : f32
+ %f2 = arith.constant 2.0 : f32
+ %v1 = vector.splat %f1 : vector<[4]xf32>
+ %v2 = vector.splat %f2 : vector<[4]xf32>
+ vector.print %v1 : vector<[4]xf32>
+ vector.print %v2 : vector<[4]xf32>
+ //
+ // Test vectors:
+ //
+ // CHECK: ( 1, 1, 1, 1
+ // CHECK: ( 2, 2, 2, 2
+
+ %v3 = vector.interleave %v1, %v2 : vector<[4]xf32>
+ vector.print %v3 : vector<[8]xf32>
+ // CHECK: ( 1, 2, 1, 2, 1, 2, 1, 2
+
+ return
+}
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir
new file mode 100644
index 000000000000..0bc78af6aba0
--- /dev/null
+++ b/mlir/test/Integration/Dialect/Vector/CPU/test-interleave.mlir
@@ -0,0 +1,24 @@
+// RUN: mlir-opt %s -test-lower-to-llvm | \
+// RUN: mlir-cpu-runner -e entry -entry-point-result=void \
+// RUN: -shared-libs=%mlir_c_runner_utils | \
+// RUN: FileCheck %s
+
+func.func @entry() {
+ %f1 = arith.constant 1.0 : f32
+ %f2 = arith.constant 2.0 : f32
+ %v1 = vector.splat %f1 : vector<2x4xf32>
+ %v2 = vector.splat %f2 : vector<2x4xf32>
+ vector.print %v1 : vector<2x4xf32>
+ vector.print %v2 : vector<2x4xf32>
+ //
+ // Test vectors:
+ //
+ // CHECK: ( ( 1, 1, 1, 1 ), ( 1, 1, 1, 1 ) )
+ // CHECK: ( ( 2, 2, 2, 2 ), ( 2, 2, 2, 2 ) )
+
+ %v3 = vector.interleave %v1, %v2 : vector<2x4xf32>
+ vector.print %v3 : vector<2x8xf32>
+ // CHECK: ( ( 1, 2, 1, 2, 1, 2, 1, 2 ), ( 1, 2, 1, 2, 1, 2, 1, 2 ) )
+
+ return
+}
diff --git a/mlir/test/Target/LLVMIR/Import/import-failure.ll b/mlir/test/Target/LLVMIR/Import/import-failure.ll
index 096213466566..9a4e939d1065 100644
--- a/mlir/test/Target/LLVMIR/Import/import-failure.ll
+++ b/mlir/test/Target/LLVMIR/Import/import-failure.ll
@@ -59,13 +59,15 @@ define void @unhandled_intrinsic() gc "example" {
; // -----
+; Check that debug intrinsics with an unsupported argument are dropped.
+
declare void @llvm.dbg.value(metadata, metadata, metadata)
; CHECK: import-failure.ll
-; CHECK-SAME: warning: dropped intrinsic: call void @llvm.dbg.value(metadata !DIArgList(i64 %arg1, i64 undef), metadata !3, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_constu, 1, DW_OP_mul, DW_OP_plus, DW_OP_stack_value)), !dbg !5
+; CHECK-SAME: warning: dropped intrinsic: call void @llvm.dbg.value(metadata !DIArgList(i64 %{{.*}}, i64 undef), metadata !3, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_constu, 1, DW_OP_mul, DW_OP_plus, DW_OP_stack_value))
; CHECK: import-failure.ll
-; CHECK-SAME: warning: dropped intrinsic: call void @llvm.dbg.value(metadata !6, metadata !3, metadata !DIExpression()), !dbg !5
-define void @dropped_instruction(i64 %arg1) {
+; CHECK-SAME: warning: dropped intrinsic: call void @llvm.dbg.value(metadata !6, metadata !3, metadata !DIExpression())
+define void @unsupported_argument(i64 %arg1) {
call void @llvm.dbg.value(metadata !DIArgList(i64 %arg1, i64 undef), metadata !3, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_constu, 1, DW_OP_mul, DW_OP_plus, DW_OP_stack_value)), !dbg !5
call void @llvm.dbg.value(metadata !6, metadata !3, metadata !DIExpression()), !dbg !5
ret void
@@ -83,6 +85,38 @@ define void @dropped_instruction(i64 %arg1) {
; // -----
+; Check that debug intrinsics that depend on cyclic metadata are dropped.
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+; CHECK: import-failure.ll
+; CHECK-SAME: warning: dropped instruction: call void @llvm.dbg.label(metadata !{{.*}})
+; CHECK: import-failure.ll
+; CHECK-SAME: warning: dropped intrinsic: call void @llvm.dbg.value(metadata i64 %{{.*}}, metadata !3, metadata !DIExpression())
+define void @cylic_metadata(i64 %arg1) {
+ call void @llvm.dbg.value(metadata i64 %arg1, metadata !10, metadata !DIExpression()), !dbg !14
+ call void @llvm.dbg.label(metadata !13), !dbg !14
+ ret void
+}
+
+!llvm.dbg.cu = !{!1}
+!llvm.module.flags = !{!0}
+!0 = !{i32 2, !"Debug Info Version", i32 3}
+!1 = distinct !DICompileUnit(language: DW_LANG_C, file: !2)
+!2 = !DIFile(filename: "import-failure.ll", directory: "/")
+!3 = !DICompositeType(tag: DW_TAG_array_type, size: 42, baseType: !4)
+!4 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !3)
+!5 = distinct !DISubprogram(name: "class_method", scope: !2, file: !2, type: !6, spFlags: DISPFlagDefinition, unit: !1)
+!6 = !DISubroutineType(types: !7)
+!7 = !{!3}
+!10 = !DILocalVariable(scope: !5, name: "arg1", file: !2, line: 1, arg: 1, align: 64);
+!11 = !DILexicalBlock(scope: !5)
+!12 = !DILexicalBlockFile(scope: !11, discriminator: 0)
+!13 = !DILabel(scope: !12, name: "label", file: !2, line: 42)
+!14 = !DILocation(line: 1, column: 2, scope: !5)
+
+; // -----
+
; global_dtors with non-null data fields cannot be represented in MLIR.
; CHECK: <unknown>
; CHECK-SAME: error: unhandled global variable: @llvm.global_dtors
diff --git a/mlir/test/lib/Dialect/ArmSME/CMakeLists.txt b/mlir/test/lib/Dialect/ArmSME/CMakeLists.txt
new file mode 100644
index 000000000000..e942c7b8ac05
--- /dev/null
+++ b/mlir/test/lib/Dialect/ArmSME/CMakeLists.txt
@@ -0,0 +1,18 @@
+# Exclude tests from libMLIR.so
+add_mlir_library(MLIRArmSMETestPasses
+ TestLowerToArmSME.cpp
+
+ EXCLUDE_FROM_LIBMLIR
+
+ LINK_LIBS PUBLIC
+ MLIRArithToArmSME
+ MLIRArmSMEToLLVM
+ MLIRArmSMEToSCF
+ MLIRArmSMETransforms
+ MLIRArmSVETransforms
+ MLIRIR
+ MLIRPass
+ MLIRTransforms
+ MLIRVectorToArmSME
+ MLIRVectorToSCF
+ )
diff --git a/mlir/test/lib/Dialect/ArmSME/TestLowerToArmSME.cpp b/mlir/test/lib/Dialect/ArmSME/TestLowerToArmSME.cpp
new file mode 100644
index 000000000000..48d4a5859f8a
--- /dev/null
+++ b/mlir/test/lib/Dialect/ArmSME/TestLowerToArmSME.cpp
@@ -0,0 +1,99 @@
+//===- TestLowerToArmSME.cpp - Test lowering to ArmSME as a sink pass -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a pass for testing the lowering to ArmSME as a
+// generally usable sink pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Conversion/ArithToArmSME/ArithToArmSME.h"
+#include "mlir/Conversion/ArmSMEToLLVM/ArmSMEToLLVM.h"
+#include "mlir/Conversion/ArmSMEToSCF/ArmSMEToSCF.h"
+#include "mlir/Conversion/VectorToArmSME/VectorToArmSME.h"
+#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
+#include "mlir/Dialect/ArmSME/Transforms/Passes.h"
+#include "mlir/Dialect/ArmSVE/Transforms/Passes.h"
+#include "mlir/IR/DialectRegistry.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Pass/PassManager.h"
+#include "mlir/Pass/PassOptions.h"
+#include "mlir/Transforms/Passes.h"
+
+using namespace mlir;
+
+namespace {
+struct TestLowerToArmSMEOptions
+ : public PassPipelineOptions<TestLowerToArmSMEOptions> {
+ PassOptions::Option<bool> fuseOuterProducts{
+ *this, "fuse-outer-products",
+ llvm::cl::desc("Fuse outer product operations via "
+ "'-arm-sme-outer-product-fusion' pass"),
+ llvm::cl::init(true)};
+};
+
+void buildTestLowerToArmSME(OpPassManager &pm,
+ const TestLowerToArmSMEOptions &options) {
+ // Legalize vector operations so they can be converted to ArmSME.
+ pm.addPass(arm_sme::createVectorLegalizationPass());
+
+ // Sprinkle some cleanups.
+ pm.addPass(createCanonicalizerPass());
+ pm.addPass(createCSEPass());
+
+ // Passes that convert operations on vectors to ArmSME operations.
+
+ // Convert Arith to ArmSME.
+ pm.addPass(createArithToArmSMEConversionPass());
+ // Convert Vector to ArmSME.
+ pm.addPass(createConvertVectorToArmSMEPass());
+
+ // Fuse outer products.
+ if (options.fuseOuterProducts)
+ pm.addPass(arm_sme::createOuterProductFusionPass());
+
+ // Convert operations on high-level vectors to loops.
+
+ // Convert ArmSME to SCF.
+ pm.addPass(createConvertArmSMEToSCFPass());
+
+ // Convert Vector to SCF (with full unroll enabled).
+ pm.addPass(createConvertVectorToSCFPass(
+ VectorTransferToSCFOptions().enableFullUnroll()));
+
+ // Allocate tiles for ArmSME operations.
+ //
+ // Later passes may create further ArmSME ops that implement the
+ // ArmSMETileOpInterface, but tiles are allocated for root operations,
+ // all of which should now exist.
+ pm.addPass(arm_sme::createTileAllocationPass());
+
+ // Enable streaming-mode and ZA.
+ pm.addPass(arm_sme::createEnableArmStreamingPass(
+ arm_sme::ArmStreamingMode::StreamingLocally, arm_sme::ArmZaMode::NewZA,
+ /*onlyIfRequiredByOps=*/true));
+
+ // Convert ArmSME to LLVM.
+ pm.addPass(createConvertArmSMEToLLVMPass());
+
+ // Sprinkle some cleanups.
+ pm.addPass(createCanonicalizerPass());
+ pm.addPass(createCSEPass());
+}
+} // namespace
+
+namespace mlir {
+namespace test {
+void registerTestLowerToArmSME() {
+ PassPipelineRegistration<TestLowerToArmSMEOptions>(
+ "test-lower-to-arm-sme",
+ "An example pipeline to lower operations on vectors (arith, vector) to "
+ "LLVM via ArmSME.",
+ buildTestLowerToArmSME);
+}
+} // namespace test
+} // namespace mlir
diff --git a/mlir/test/lib/Dialect/CMakeLists.txt b/mlir/test/lib/Dialect/CMakeLists.txt
index 30a17c201ff7..e20cd4473a35 100644
--- a/mlir/test/lib/Dialect/CMakeLists.txt
+++ b/mlir/test/lib/Dialect/CMakeLists.txt
@@ -1,5 +1,6 @@
add_subdirectory(Affine)
add_subdirectory(Arith)
+add_subdirectory(ArmSME)
add_subdirectory(Bufferization)
add_subdirectory(ControlFlow)
add_subdirectory(DLTI)
diff --git a/mlir/test/lib/Dialect/Test/TestPatterns.cpp b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
index 108cfe8950ef..bde4255ee4b3 100644
--- a/mlir/test/lib/Dialect/Test/TestPatterns.cpp
+++ b/mlir/test/lib/Dialect/Test/TestPatterns.cpp
@@ -1152,8 +1152,10 @@ struct TestLegalizePatternDriver
// Handle a partial conversion.
if (mode == ConversionMode::Partial) {
DenseSet<Operation *> unlegalizedOps;
- if (failed(applyPartialConversion(
- getOperation(), target, std::move(patterns), &unlegalizedOps))) {
+ ConversionConfig config;
+ config.unlegalizedOps = &unlegalizedOps;
+ if (failed(applyPartialConversion(getOperation(), target,
+ std::move(patterns), config))) {
getOperation()->emitRemark() << "applyPartialConversion failed";
}
// Emit remarks for each legalizable operation.
@@ -1181,8 +1183,10 @@ struct TestLegalizePatternDriver
// Analyze the convertible operations.
DenseSet<Operation *> legalizedOps;
+ ConversionConfig config;
+ config.legalizableOps = &legalizedOps;
if (failed(applyAnalysisConversion(getOperation(), target,
- std::move(patterns), legalizedOps)))
+ std::move(patterns), config)))
return signalPassFailure();
// Emit remarks for each legalizable operation.
@@ -1806,8 +1810,10 @@ struct TestMergeBlocksPatternDriver
});
DenseSet<Operation *> unlegalizedOps;
+ ConversionConfig config;
+ config.unlegalizedOps = &unlegalizedOps;
(void)applyPartialConversion(getOperation(), target, std::move(patterns),
- &unlegalizedOps);
+ config);
for (auto *op : unlegalizedOps)
op->emitRemark() << "op '" << op->getName() << "' is not legalizable";
}
diff --git a/mlir/tools/mlir-opt/CMakeLists.txt b/mlir/tools/mlir-opt/CMakeLists.txt
index 68aa6bad5f92..701fc461b3b4 100644
--- a/mlir/tools/mlir-opt/CMakeLists.txt
+++ b/mlir/tools/mlir-opt/CMakeLists.txt
@@ -17,6 +17,7 @@ if(MLIR_INCLUDE_TESTS)
MLIRTestFuncToLLVM
MLIRAffineTransformsTestPasses
MLIRArithTestPasses
+ MLIRArmSMETestPasses
MLIRBufferizationTestPasses
MLIRControlFlowTestPasses
MLIRDLTITestPasses
diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp
index f11c6b4355fd..4dfa05cc8ca8 100644
--- a/mlir/tools/mlir-opt/mlir-opt.cpp
+++ b/mlir/tools/mlir-opt/mlir-opt.cpp
@@ -109,6 +109,7 @@ void registerTestLoopFusion();
void registerTestCFGLoopInfoPass();
void registerTestLoopMappingPass();
void registerTestLoopUnrollingPass();
+void registerTestLowerToArmSME();
void registerTestLowerToLLVM();
void registerTestMakeIsolatedFromAbovePass();
void registerTestMatchReductionPass();
@@ -233,6 +234,7 @@ void registerTestPasses() {
mlir::test::registerTestCFGLoopInfoPass();
mlir::test::registerTestLoopMappingPass();
mlir::test::registerTestLoopUnrollingPass();
+ mlir::test::registerTestLowerToArmSME();
mlir::test::registerTestLowerToLLVM();
mlir::test::registerTestMakeIsolatedFromAbovePass();
mlir::test::registerTestMatchReductionPass();
diff --git a/mlir/unittests/Dialect/SPIRV/SerializationTest.cpp b/mlir/unittests/Dialect/SPIRV/SerializationTest.cpp
index 3a6bcbd999a5..9d2f690ed898 100644
--- a/mlir/unittests/Dialect/SPIRV/SerializationTest.cpp
+++ b/mlir/unittests/Dialect/SPIRV/SerializationTest.cpp
@@ -77,7 +77,7 @@ protected:
}
// Inserts an Integer or a Vector of Integers constant of value 'val'.
- spirv::ConstantOp AddConstInt(Type type, const APInt &val) {
+ spirv::ConstantOp addConstInt(Type type, const APInt &val) {
OpBuilder builder(module->getRegion());
auto loc = UnknownLoc::get(&context);
@@ -181,8 +181,8 @@ TEST_F(SerializationTest, SignlessVsSignedIntegerConstantBitExtension) {
APInt signedIntConstVal(signedInt16Type.getWidth(), -1,
signedInt16Type.getSignedness());
- AddConstInt(signlessInt16Type, signlessIntConstVal);
- AddConstInt(signedInt16Type, signedIntConstVal);
+ addConstInt(signlessInt16Type, signlessIntConstVal);
+ addConstInt(signedInt16Type, signedIntConstVal);
ASSERT_TRUE(succeeded(spirv::serialize(module.get(), binary)));
auto hasSignlessVal = [&](spirv::Opcode opcode, ArrayRef<uint32_t> operands) {
diff --git a/mlir/unittests/IR/InterfaceAttachmentTest.cpp b/mlir/unittests/IR/InterfaceAttachmentTest.cpp
index 2e1309ad776f..16de34c45ec6 100644
--- a/mlir/unittests/IR/InterfaceAttachmentTest.cpp
+++ b/mlir/unittests/IR/InterfaceAttachmentTest.cpp
@@ -421,7 +421,7 @@ TEST(InterfaceAttachmentTest, PromisedInterfaces) {
// Attribute interfaces use the exact same mechanism as types, so just check
// that the promise mechanism works for attributes.
MLIRContext context;
- auto testDialect = context.getOrLoadDialect<test::TestDialect>();
+ auto *testDialect = context.getOrLoadDialect<test::TestDialect>();
auto attr = test::SimpleAAttr::get(&context);
// `SimpleAAttr` doesn't implement nor promises the
diff --git a/mlir/unittests/IR/OperationSupportTest.cpp b/mlir/unittests/IR/OperationSupportTest.cpp
index 8a4f67b92627..9d75615b39c0 100644
--- a/mlir/unittests/IR/OperationSupportTest.cpp
+++ b/mlir/unittests/IR/OperationSupportTest.cpp
@@ -295,9 +295,9 @@ TEST(OperationEquivalenceTest, HashWorksWithFlags) {
MLIRContext context;
context.getOrLoadDialect<test::TestDialect>();
- auto op1 = createOp(&context);
+ auto *op1 = createOp(&context);
// `op1` has an unknown loc.
- auto op2 = createOp(&context);
+ auto *op2 = createOp(&context);
op2->setLoc(NameLoc::get(StringAttr::get(&context, "foo")));
auto getHash = [](Operation *op, OperationEquivalence::Flags flags) {
return OperationEquivalence::computeHash(
diff --git a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
index a00ebba7b97e..26bfbd5c11e8 100644
--- a/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
+++ b/mlir/unittests/Target/LLVM/SerializeNVVMTarget.cpp
@@ -37,7 +37,7 @@ using namespace mlir;
class MLIRTargetLLVMNVVM : public ::testing::Test {
protected:
- virtual void SetUp() {
+ void SetUp() override {
registerBuiltinDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
registerGPUDialectTranslation(registry);
@@ -85,7 +85,7 @@ TEST_F(MLIRTargetLLVMNVVM, SKIP_WITHOUT_NVPTX(SerializeNVVMMToLLVM)) {
serializer.serializeToObject(gpuModule, options);
// Check that the serializer was successful.
ASSERT_TRUE(object != std::nullopt);
- ASSERT_TRUE(object->size() > 0);
+ ASSERT_TRUE(!object->empty());
// Read the serialized module.
llvm::MemoryBufferRef buffer(StringRef(object->data(), object->size()),
@@ -121,7 +121,7 @@ TEST_F(MLIRTargetLLVMNVVM, SKIP_WITHOUT_NVPTX(SerializeNVVMToPTX)) {
serializer.serializeToObject(gpuModule, options);
// Check that the serializer was successful.
ASSERT_TRUE(object != std::nullopt);
- ASSERT_TRUE(object->size() > 0);
+ ASSERT_TRUE(!object->empty());
ASSERT_TRUE(
StringRef(object->data(), object->size()).contains("nvvm_kernel"));
@@ -151,6 +151,6 @@ TEST_F(MLIRTargetLLVMNVVM, SKIP_WITHOUT_NVPTX(SerializeNVVMToBinary)) {
serializer.serializeToObject(gpuModule, options);
// Check that the serializer was successful.
ASSERT_TRUE(object != std::nullopt);
- ASSERT_TRUE(object->size() > 0);
+ ASSERT_TRUE(!object->empty());
}
}
diff --git a/openmp/libomptarget/CMakeLists.txt b/openmp/libomptarget/CMakeLists.txt
index 17e61d0bc47d..a74eff0c0beb 100644
--- a/openmp/libomptarget/CMakeLists.txt
+++ b/openmp/libomptarget/CMakeLists.txt
@@ -119,14 +119,7 @@ endif()
pythonize_bool(LIBOMPTARGET_OMPT_SUPPORT)
-# Check if this build supports the GPU libc.
-set(LIBC_GPU_SUPPORT FALSE)
-if("libc" IN_LIST LLVM_ENABLE_RUNTIMES AND (LIBC_GPU_BUILD OR
- LIBC_GPU_ARCHITECTURES))
- set(LIBC_GPU_SUPPORT TRUE)
-endif()
-
-set(LIBOMPTARGET_GPU_LIBC_SUPPORT ${LIBC_GPU_SUPPORT} CACHE BOOL
+set(LIBOMPTARGET_GPU_LIBC_SUPPORT ${LLVM_LIBC_GPU_BUILD} CACHE BOOL
"Libomptarget support for the GPU libc")
pythonize_bool(LIBOMPTARGET_GPU_LIBC_SUPPORT)
diff --git a/openmp/libomptarget/include/PluginManager.h b/openmp/libomptarget/include/PluginManager.h
index ec5d98dc8cd3..77684285ddf1 100644
--- a/openmp/libomptarget/include/PluginManager.h
+++ b/openmp/libomptarget/include/PluginManager.h
@@ -69,7 +69,7 @@ struct PluginAdaptorTy {
/// Access to the shared object file representing the plugin.
std::unique_ptr<llvm::sys::DynamicLibrary> LibraryHandler;
-#define PLUGIN_API_HANDLE(NAME, MANDATORY) \
+#define PLUGIN_API_HANDLE(NAME) \
using NAME##_ty = decltype(__tgt_rtl_##NAME); \
NAME##_ty *NAME = nullptr;
@@ -114,8 +114,10 @@ struct PluginManager {
// Unregister a shared library from all RTLs.
void unregisterLib(__tgt_bin_desc *Desc);
- void addDeviceImage(__tgt_bin_desc &TgtBinDesc, __tgt_device_image &TgtDeviceImage) {
- DeviceImages.emplace_back(std::make_unique<DeviceImageTy>(TgtBinDesc, TgtDeviceImage));
+ void addDeviceImage(__tgt_bin_desc &TgtBinDesc,
+ __tgt_device_image &TgtDeviceImage) {
+ DeviceImages.emplace_back(
+ std::make_unique<DeviceImageTy>(TgtBinDesc, TgtDeviceImage));
}
/// Return the device presented to the user as device \p DeviceNo if it is
@@ -206,6 +208,12 @@ private:
ProtectedObj<DeviceContainerTy> Devices;
};
+/// Initialize the plugin manager and OpenMP runtime.
+void initRuntime();
+
+/// Deinitialize the plugin and delete it.
+void deinitRuntime();
+
extern PluginManager *PM;
#endif // OMPTARGET_PLUGIN_MANAGER_H
diff --git a/openmp/libomptarget/include/Shared/PluginAPI.inc b/openmp/libomptarget/include/Shared/PluginAPI.inc
index 3b982e30307a..e445da6852f7 100644
--- a/openmp/libomptarget/include/Shared/PluginAPI.inc
+++ b/openmp/libomptarget/include/Shared/PluginAPI.inc
@@ -13,39 +13,39 @@
// No include guards!
-PLUGIN_API_HANDLE(init_plugin, true);
-PLUGIN_API_HANDLE(is_valid_binary, true);
-PLUGIN_API_HANDLE(is_data_exchangable, false);
-PLUGIN_API_HANDLE(number_of_devices, true);
-PLUGIN_API_HANDLE(init_device, true);
-PLUGIN_API_HANDLE(load_binary, true);
-PLUGIN_API_HANDLE(get_global, true);
-PLUGIN_API_HANDLE(get_function, true);
-PLUGIN_API_HANDLE(data_alloc, true);
-PLUGIN_API_HANDLE(data_submit, true);
-PLUGIN_API_HANDLE(data_submit_async, false);
-PLUGIN_API_HANDLE(data_retrieve, true);
-PLUGIN_API_HANDLE(data_retrieve_async, false);
-PLUGIN_API_HANDLE(data_exchange, false);
-PLUGIN_API_HANDLE(data_exchange_async, false);
-PLUGIN_API_HANDLE(data_delete, true);
-PLUGIN_API_HANDLE(launch_kernel, true);
-PLUGIN_API_HANDLE(init_requires, false);
-PLUGIN_API_HANDLE(synchronize, false);
-PLUGIN_API_HANDLE(query_async, false);
-PLUGIN_API_HANDLE(set_info_flag, false);
-PLUGIN_API_HANDLE(print_device_info, false);
-PLUGIN_API_HANDLE(create_event, false);
-PLUGIN_API_HANDLE(record_event, false);
-PLUGIN_API_HANDLE(wait_event, false);
-PLUGIN_API_HANDLE(sync_event, false);
-PLUGIN_API_HANDLE(destroy_event, false);
-PLUGIN_API_HANDLE(init_async_info, false);
-PLUGIN_API_HANDLE(init_device_info, false);
-PLUGIN_API_HANDLE(data_lock, false);
-PLUGIN_API_HANDLE(data_unlock, false);
-PLUGIN_API_HANDLE(data_notify_mapped, false);
-PLUGIN_API_HANDLE(data_notify_unmapped, false);
-PLUGIN_API_HANDLE(set_device_offset, false);
-PLUGIN_API_HANDLE(initialize_record_replay, false);
-PLUGIN_API_HANDLE(use_auto_zero_copy, false);
+PLUGIN_API_HANDLE(init_plugin);
+PLUGIN_API_HANDLE(is_valid_binary);
+PLUGIN_API_HANDLE(is_data_exchangable);
+PLUGIN_API_HANDLE(number_of_devices);
+PLUGIN_API_HANDLE(init_device);
+PLUGIN_API_HANDLE(load_binary);
+PLUGIN_API_HANDLE(get_global);
+PLUGIN_API_HANDLE(get_function);
+PLUGIN_API_HANDLE(data_alloc);
+PLUGIN_API_HANDLE(data_submit);
+PLUGIN_API_HANDLE(data_submit_async);
+PLUGIN_API_HANDLE(data_retrieve);
+PLUGIN_API_HANDLE(data_retrieve_async);
+PLUGIN_API_HANDLE(data_exchange);
+PLUGIN_API_HANDLE(data_exchange_async);
+PLUGIN_API_HANDLE(data_delete);
+PLUGIN_API_HANDLE(launch_kernel);
+PLUGIN_API_HANDLE(init_requires);
+PLUGIN_API_HANDLE(synchronize);
+PLUGIN_API_HANDLE(query_async);
+PLUGIN_API_HANDLE(set_info_flag);
+PLUGIN_API_HANDLE(print_device_info);
+PLUGIN_API_HANDLE(create_event);
+PLUGIN_API_HANDLE(record_event);
+PLUGIN_API_HANDLE(wait_event);
+PLUGIN_API_HANDLE(sync_event);
+PLUGIN_API_HANDLE(destroy_event);
+PLUGIN_API_HANDLE(init_async_info);
+PLUGIN_API_HANDLE(init_device_info);
+PLUGIN_API_HANDLE(data_lock);
+PLUGIN_API_HANDLE(data_unlock);
+PLUGIN_API_HANDLE(data_notify_mapped);
+PLUGIN_API_HANDLE(data_notify_unmapped);
+PLUGIN_API_HANDLE(set_device_offset);
+PLUGIN_API_HANDLE(initialize_record_replay);
+PLUGIN_API_HANDLE(use_auto_zero_copy);
diff --git a/openmp/libomptarget/include/Shared/SourceInfo.h b/openmp/libomptarget/include/Shared/SourceInfo.h
index 7ce5fd43efc0..711f06a04d01 100644
--- a/openmp/libomptarget/include/Shared/SourceInfo.h
+++ b/openmp/libomptarget/include/Shared/SourceInfo.h
@@ -13,6 +13,7 @@
#ifndef OMPTARGET_SHARED_SOURCE_INFO_H
#define OMPTARGET_SHARED_SOURCE_INFO_H
+#include <stdint.h>
#include <string>
#ifdef _WIN32
diff --git a/openmp/libomptarget/include/omptarget.h b/openmp/libomptarget/include/omptarget.h
index c4faa23427f1..9a2bd1340e3b 100644
--- a/openmp/libomptarget/include/omptarget.h
+++ b/openmp/libomptarget/include/omptarget.h
@@ -312,6 +312,12 @@ void *llvm_omp_target_dynamic_shared_alloc();
/// add the clauses of the requires directives in a given file
void __tgt_register_requires(int64_t Flags);
+/// Initializes the runtime library.
+void __tgt_rtl_init();
+
+/// Deinitializes the runtime library.
+void __tgt_rtl_deinit();
+
/// adds a target shared library to the target execution image
void __tgt_register_lib(__tgt_bin_desc *Desc);
diff --git a/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt
index 8ae3ff2a6d29..085d44307165 100644
--- a/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/common/CMakeLists.txt
@@ -73,8 +73,12 @@ elseif(${LIBOMPTARGET_GPU_LIBC_SUPPORT})
find_library(llvmlibc_rpc_server NAMES llvmlibc_rpc_server
PATHS ${LIBOMPTARGET_LLVM_LIBRARY_DIR} NO_DEFAULT_PATH)
if(llvmlibc_rpc_server)
- target_link_libraries(PluginCommon PRIVATE llvmlibc_rpc_server)
+ target_link_libraries(PluginCommon PRIVATE ${llvmlibc_rpc_server})
target_compile_definitions(PluginCommon PRIVATE LIBOMPTARGET_RPC_SUPPORT)
+ # We may need to get the headers directly from the 'libc' source directory.
+ target_include_directories(PluginCommon PRIVATE
+ ${CMAKE_SOURCE_DIR}/../libc/utils/gpu/server
+ ${CMAKE_SOURCE_DIR}/../libc/include)
endif()
endif()
diff --git a/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp b/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp
index 54aced11b31c..cb6a5086bc4d 100644
--- a/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp
+++ b/openmp/libomptarget/plugins-nextgen/common/src/RPC.cpp
@@ -18,7 +18,8 @@
#if __has_include(<gpu-none-llvm/rpc_server.h>)
#include <gpu-none-llvm/rpc_server.h>
#elif defined(LIBOMPTARGET_RPC_SUPPORT)
-#include <rpc_server.h>
+// Just pull this out of the source if available.
+#include "rpc_server.h"
#endif
using namespace llvm;
diff --git a/openmp/libomptarget/src/OffloadRTL.cpp b/openmp/libomptarget/src/OffloadRTL.cpp
index 86ef0d5bc91c..dd75b1b18150 100644
--- a/openmp/libomptarget/src/OffloadRTL.cpp
+++ b/openmp/libomptarget/src/OffloadRTL.cpp
@@ -20,25 +20,39 @@
extern void llvm::omp::target::ompt::connectLibrary();
#endif
-__attribute__((constructor(101))) void init() {
+static std::mutex PluginMtx;
+static uint32_t RefCount = 0;
+
+void initRuntime() {
+ std::scoped_lock<decltype(PluginMtx)> Lock(PluginMtx);
Profiler::get();
TIMESCOPE();
- DP("Init offload library!\n");
-
- PM = new PluginManager();
+ if (PM == nullptr)
+ PM = new PluginManager();
+ RefCount++;
+ if (RefCount == 1) {
+ DP("Init offload library!\n");
#ifdef OMPT_SUPPORT
- // Initialize OMPT first
- llvm::omp::target::ompt::connectLibrary();
+ // Initialize OMPT first
+ llvm::omp::target::ompt::connectLibrary();
#endif
- PM->init();
-
- PM->registerDelayedLibraries();
+ PM->init();
+ PM->registerDelayedLibraries();
+ }
}
-__attribute__((destructor(101))) void deinit() {
- DP("Deinit offload library!\n");
- delete PM;
+void deinitRuntime() {
+ std::scoped_lock<decltype(PluginMtx)> Lock(PluginMtx);
+ assert(PM && "Runtime not initialized");
+
+ if (RefCount == 1) {
+ DP("Deinit offload library!\n");
+ delete PM;
+ PM = nullptr;
+ }
+
+ RefCount--;
}
diff --git a/openmp/libomptarget/src/PluginManager.cpp b/openmp/libomptarget/src/PluginManager.cpp
index 34f1f4969da3..928913275332 100644
--- a/openmp/libomptarget/src/PluginManager.cpp
+++ b/openmp/libomptarget/src/PluginManager.cpp
@@ -21,7 +21,7 @@
using namespace llvm;
using namespace llvm::sys;
-PluginManager *PM;
+PluginManager *PM = nullptr;
// List of all plugins that can support offloading.
static const char *RTLNames[] = {ENABLED_OFFLOAD_PLUGINS};
@@ -56,10 +56,10 @@ PluginAdaptorTy::PluginAdaptorTy(const std::string &Name,
Error PluginAdaptorTy::init() {
-#define PLUGIN_API_HANDLE(NAME, MANDATORY) \
+#define PLUGIN_API_HANDLE(NAME) \
NAME = reinterpret_cast<decltype(NAME)>( \
LibraryHandler->getAddressOfSymbol(GETNAME(__tgt_rtl_##NAME))); \
- if (MANDATORY && !NAME) { \
+ if (!NAME) { \
return createStringError(inconvertibleErrorCode(), \
"Invalid plugin as necessary interface function " \
"(%s) was not found.\n", \
diff --git a/openmp/libomptarget/src/exports b/openmp/libomptarget/src/exports
index af882a264264..d5432a9eed38 100644
--- a/openmp/libomptarget/src/exports
+++ b/openmp/libomptarget/src/exports
@@ -1,5 +1,7 @@
VERS1.0 {
global:
+ __tgt_rtl_init;
+ __tgt_rtl_deinit;
__tgt_register_requires;
__tgt_register_lib;
__tgt_unregister_lib;
diff --git a/openmp/libomptarget/src/interface.cpp b/openmp/libomptarget/src/interface.cpp
index d2707f39a1aa..8b89bc3ff712 100644
--- a/openmp/libomptarget/src/interface.cpp
+++ b/openmp/libomptarget/src/interface.cpp
@@ -38,9 +38,13 @@ EXTERN void __tgt_register_requires(int64_t Flags) {
__PRETTY_FUNCTION__);
}
+EXTERN void __tgt_rtl_init() { initRuntime(); }
+EXTERN void __tgt_rtl_deinit() { deinitRuntime(); }
+
////////////////////////////////////////////////////////////////////////////////
/// adds a target shared library to the target execution image
EXTERN void __tgt_register_lib(__tgt_bin_desc *Desc) {
+ initRuntime();
if (PM->delayRegisterLib(Desc))
return;
@@ -49,12 +53,17 @@ EXTERN void __tgt_register_lib(__tgt_bin_desc *Desc) {
////////////////////////////////////////////////////////////////////////////////
/// Initialize all available devices without registering any image
-EXTERN void __tgt_init_all_rtls() { PM->initAllPlugins(); }
+EXTERN void __tgt_init_all_rtls() {
+ assert(PM && "Runtime not initialized");
+ PM->initAllPlugins();
+}
////////////////////////////////////////////////////////////////////////////////
/// unloads a target shared library
EXTERN void __tgt_unregister_lib(__tgt_bin_desc *Desc) {
PM->unregisterLib(Desc);
+
+ deinitRuntime();
}
template <typename TargetAsyncInfoTy>
@@ -64,6 +73,7 @@ targetData(ident_t *Loc, int64_t DeviceId, int32_t ArgNum, void **ArgsBase,
map_var_info_t *ArgNames, void **ArgMappers,
TargetDataFuncPtrTy TargetDataFunction, const char *RegionTypeMsg,
const char *RegionName) {
+ assert(PM && "Runtime not initialized");
static_assert(std::is_convertible_v<TargetAsyncInfoTy, AsyncInfoTy>,
"TargetAsyncInfoTy must be convertible to AsyncInfoTy.");
@@ -239,6 +249,7 @@ template <typename TargetAsyncInfoTy>
static inline int targetKernel(ident_t *Loc, int64_t DeviceId, int32_t NumTeams,
int32_t ThreadLimit, void *HostPtr,
KernelArgsTy *KernelArgs) {
+ assert(PM && "Runtime not initialized");
static_assert(std::is_convertible_v<TargetAsyncInfoTy, AsyncInfoTy>,
"Target AsyncInfoTy must be convertible to AsyncInfoTy.");
DP("Entering target region for device %" PRId64 " with entry point " DPxMOD
@@ -345,6 +356,7 @@ EXTERN int __tgt_activate_record_replay(int64_t DeviceId, uint64_t MemorySize,
void *VAddr, bool IsRecord,
bool SaveOutput,
uint64_t &ReqPtrArgOffset) {
+ assert(PM && "Runtime not initialized");
OMPT_IF_BUILT(ReturnAddressSetterRAII RA(__builtin_return_address(0)));
auto DeviceOrErr = PM->getDevice(DeviceId);
if (!DeviceOrErr)
@@ -380,7 +392,7 @@ EXTERN int __tgt_target_kernel_replay(ident_t *Loc, int64_t DeviceId,
ptrdiff_t *TgtOffsets, int32_t NumArgs,
int32_t NumTeams, int32_t ThreadLimit,
uint64_t LoopTripCount) {
-
+ assert(PM && "Runtime not initialized");
OMPT_IF_BUILT(ReturnAddressSetterRAII RA(__builtin_return_address(0)));
if (checkDeviceAndCtors(DeviceId, Loc)) {
DP("Not offloading to device %" PRId64 "\n", DeviceId);
@@ -431,6 +443,7 @@ EXTERN void __tgt_push_mapper_component(void *RtMapperHandle, void *Base,
}
EXTERN void __tgt_set_info_flag(uint32_t NewInfoLevel) {
+ assert(PM && "Runtime not initialized");
std::atomic<uint32_t> &InfoLevel = getInfoLevelInternal();
InfoLevel.store(NewInfoLevel);
for (auto &R : PM->pluginAdaptors()) {
@@ -440,6 +453,7 @@ EXTERN void __tgt_set_info_flag(uint32_t NewInfoLevel) {
}
EXTERN int __tgt_print_device_info(int64_t DeviceId) {
+ assert(PM && "Runtime not initialized");
auto DeviceOrErr = PM->getDevice(DeviceId);
if (!DeviceOrErr)
FATAL_MESSAGE(DeviceId, "%s", toString(DeviceOrErr.takeError()).c_str());
@@ -448,7 +462,9 @@ EXTERN int __tgt_print_device_info(int64_t DeviceId) {
}
EXTERN void __tgt_target_nowait_query(void **AsyncHandle) {
+ assert(PM && "Runtime not initialized");
OMPT_IF_BUILT(ReturnAddressSetterRAII RA(__builtin_return_address(0)));
+
if (!AsyncHandle || !*AsyncHandle) {
FATAL_MESSAGE0(
1, "Receive an invalid async handle from the current OpenMP task. Is "
diff --git a/openmp/libomptarget/test/lit.cfg b/openmp/libomptarget/test/lit.cfg
index 565556e64ff2..6c590603079c 100644
--- a/openmp/libomptarget/test/lit.cfg
+++ b/openmp/libomptarget/test/lit.cfg
@@ -180,8 +180,12 @@ def remove_suffix_if_present(name):
def add_libraries(source):
if config.libomptarget_has_libc:
- return source + " " + config.llvm_library_dir + "/libcgpu.a " + \
- config.llvm_library_intdir + "/libomptarget.devicertl.a"
+ if config.libomptarget_current_target.startswith('nvptx'):
+ return source + " " + config.llvm_library_dir + "/libcgpu-nvptx.a " + \
+ config.llvm_library_intdir + "/libomptarget.devicertl.a"
+ elif config.libomptarget_current_target.startswith('amdgcn'):
+ return source + " " + config.llvm_library_dir + "/libcgpu-amdgpu.a " + \
+ config.llvm_library_intdir + "/libomptarget.devicertl.a"
return source + " " + config.llvm_library_intdir + "/libomptarget.devicertl.a"
# substitutions
diff --git a/openmp/libomptarget/test/offloading/runtime_init.c b/openmp/libomptarget/test/offloading/runtime_init.c
new file mode 100644
index 000000000000..96fd50f51da1
--- /dev/null
+++ b/openmp/libomptarget/test/offloading/runtime_init.c
@@ -0,0 +1,30 @@
+// RUN: %libomptarget-compile-generic
+// RUN: env LIBOMPTARGET_DEBUG=1 %libomptarget-run-generic 2>&1 \
+// RUN: %fcheck-generic
+
+// REQUIRES: libomptarget-debug
+
+#include <omp.h>
+#include <stdio.h>
+
+extern void __tgt_rtl_init(void);
+extern void __tgt_rtl_deinit(void);
+
+// Sanity checks to make sure that this works and is thread safe.
+int main() {
+ // CHECK: Init offload library!
+ // CHECK: Deinit offload library!
+ __tgt_rtl_init();
+#pragma omp parallel num_threads(8)
+ {
+ __tgt_rtl_init();
+ __tgt_rtl_deinit();
+ }
+ __tgt_rtl_deinit();
+
+ __tgt_rtl_init();
+ __tgt_rtl_deinit();
+
+ // CHECK: PASS
+ printf("PASS\n");
+}
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index 8d11fb9be188..09c53c9e8a13 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -613,14 +613,15 @@ libc_support_library(
libc_support_library(
name = "__support_fixed_point",
hdrs = [
- "src/__support/fixed_point/fx_rep.h",
"src/__support/fixed_point/fx_bits.h",
+ "src/__support/fixed_point/fx_rep.h",
],
deps = [
":__support_cpp_bit",
":__support_cpp_type_traits",
":__support_macros_attributes",
":__support_macros_optimization",
+ ":__support_math_extras",
":llvm_libc_macros_stdfix_macros",
],
)
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index bb7a34ef7677..853d136d9478 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -3231,9 +3231,7 @@ cc_library(
":Transforms",
":VectorToLLVM",
":VectorTransforms",
- ] + if_cuda_available([
- ":SerializeToCubin",
- ]),
+ ],
)
##---------------------------------------------------------------------------##
@@ -5504,9 +5502,6 @@ cc_library(
"lib/Dialect/GPU/Transforms/*.cpp",
"lib/Dialect/GPU/Transforms/*.h",
],
- exclude = [
- "lib/Dialect/GPU/Transforms/SerializeToCubin.cpp",
- ],
),
hdrs = glob(["include/mlir/Dialect/GPU/Transforms/*.h"]),
includes = ["include"],
@@ -5540,7 +5535,6 @@ cc_library(
":SCFDialect",
":SPIRVDialect",
":SPIRVTarget",
- ":SerializeToCubin_stub",
":SideEffectInterfaces",
":Support",
":ToLLVMIRTranslation",
@@ -5557,66 +5551,6 @@ cc_library(
]),
)
-cc_library(
- name = "SerializeToCubin",
- srcs = [
- "lib/Dialect/GPU/Transforms/SerializeToCubin.cpp",
- ],
- local_defines = if_cuda_available(["MLIR_GPU_TO_CUBIN_PASS_ENABLE"]),
- deps = [
- ":GPUDialect",
- ":GPUPassIncGen",
- ":GPUTransforms",
- ":NVVMDialect",
- ":NVVMToLLVMIRTranslation",
- ":Pass",
- ":Support",
- ":ToLLVMIRTranslation",
- "//llvm:Support",
- ] + if_cuda_available([
- "@cuda//:cuda_headers",
- "@cuda//:libcuda",
- ]),
-)
-
-write_file(
- name = "SerializeToCubin_stub_cc",
- out = "SerializeToCubin_stub.cc",
- content = [
- """
-#include "mlir/Dialect/GPU/Transforms/Passes.h"
-
-// Provide a weak registration stub in case the real SerializeToCubin is not
-// linked in.
-
-#if defined(_MSC_VER)
-// This might not work correctly, but it avoids a compilation error because
-// MSVC does not support __attribute__((weak)).
-void mlir::registerGpuSerializeToCubinPass() {}
-#else
-__attribute__((weak)) void mlir::registerGpuSerializeToCubinPass() {}
-#endif
-""",
- ],
-)
-
-cc_library(
- name = "SerializeToCubin_stub",
- srcs = [":SerializeToCubin_stub_cc"],
- hdrs = glob(["include/mlir/Dialect/GPU/Transforms/*.h"]),
- includes = ["include"],
- deps = [
- ":GPUDialect",
- ":GPUPassIncGen",
- ":IR",
- ":Pass",
- ":SPIRVDialect",
- ":Support",
- ":VectorDialect",
- "//llvm:Support",
- ],
-)
-
td_library(
name = "GPUTransformOpsTdFiles",
srcs = [
@@ -9229,7 +9163,6 @@ cc_binary(
":Pass",
":QuantOps",
":SCFToGPU",
- ":SerializeToCubin",
":Support",
":Transforms",
"//llvm:AllTargetsCodeGens",
@@ -9237,6 +9170,7 @@ cc_binary(
"//mlir/test:TestAffine",
"//mlir/test:TestAnalysis",
"//mlir/test:TestArith",
+ "//mlir/test:TestArmSME",
"//mlir/test:TestBufferization",
"//mlir/test:TestControlFlow",
"//mlir/test:TestDLTI",
@@ -13193,7 +13127,6 @@ cc_library(
],
)
-
##---------------------------------------------------------------------------##
# Allocation interfaces
##---------------------------------------------------------------------------##
diff --git a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
index c3bc3f196c55..68d9b23fd564 100644
--- a/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/test/BUILD.bazel
@@ -906,6 +906,25 @@ cc_library(
)
cc_library(
+ name = "TestArmSME",
+ srcs = glob(["lib/Dialect/ArmSME/*.cpp"]),
+ defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],
+ includes = ["lib/Dialect/Test"],
+ deps = [
+ "//mlir:ArithToArmSME",
+ "//mlir:ArmSMEToLLVM",
+ "//mlir:ArmSMEToSCF",
+ "//mlir:ArmSMETransforms",
+ "//mlir:ArmSVETransforms",
+ "//mlir:IR",
+ "//mlir:Pass",
+ "//mlir:Transforms",
+ "//mlir:VectorToArmSME",
+ "//mlir:VectorToSCF",
+ ],
+)
+
+cc_library(
name = "TestBufferization",
srcs = glob(["lib/Dialect/Bufferization/*.cpp"]),
defines = ["MLIR_CUDA_CONVERSIONS_ENABLED"],