summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Buka <vitalybuka@google.com>2024-01-08 11:48:45 -0800
committerVitaly Buka <vitalybuka@google.com>2024-01-08 11:48:45 -0800
commitb2845d6945cf560e26f98128d06b22e85953612d (patch)
tree9a868b16f59dd198c831a1e7f3921efd7dc7d745
parentc7e4065aad78f77d61be1d1ac674546cc62208d1 (diff)
parente7655ad605d77e206ec94b2cef59c41a508edba7 (diff)
Created using spr 1.3.4 [skip ci]
-rw-r--r--.github/workflows/libclang-python-tests.yml39
-rw-r--r--.github/workflows/llvm-project-tests.yml2
-rw-r--r--bolt/test/RISCV/relax.s2
-rw-r--r--clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp60
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst4
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward.cpp31
-rw-r--r--clang/docs/ReleaseNotes.rst5
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td10
-rw-r--r--clang/include/clang/Basic/OpenACCKinds.h3
-rw-r--r--clang/lib/AST/ASTImporter.cpp34
-rw-r--r--clang/lib/AST/FormatString.cpp1
-rw-r--r--clang/lib/AST/Interp/Interp.cpp4
-rw-r--r--clang/lib/AST/Interp/Interp.h6
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp19
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.cpp25
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp8
-rw-r--r--clang/lib/Lex/ModuleMap.cpp4
-rw-r--r--clang/lib/Parse/ParseOpenACC.cpp26
-rw-r--r--clang/lib/Sema/SemaInit.cpp52
-rw-r--r--clang/lib/Sema/SemaOverload.cpp94
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp25
-rw-r--r--clang/test/AST/Interp/arrays.cpp8
-rw-r--r--clang/test/CXX/over/over.load/p2-0x.cpp5
-rw-r--r--clang/test/CXX/temp/temp.arg/temp.arg.template/p3-2a.cpp29
-rw-r--r--clang/test/CodeGen/RISCV/riscv-func-attr-target.c8
-rw-r--r--clang/test/Driver/linker-wrapper-image.c2
-rw-r--r--clang/test/Driver/mingw-sysroot.cpp28
-rw-r--r--clang/test/Driver/riscv-rvv-vector-bits.c2
-rw-r--r--clang/test/Modules/Inputs/AutolinkTBD.framework/AutolinkTBD.tbd1
-rw-r--r--clang/test/Modules/Inputs/AutolinkTBD.framework/Headers/AutolinkTBD.h1
-rw-r--r--clang/test/Modules/autolinkTBD.m16
-rw-r--r--clang/test/Modules/autolink_private_module.m25
-rw-r--r--clang/test/Modules/explicit-specializations.cppm133
-rw-r--r--clang/test/ParserOpenACC/parse-clauses.c84
-rw-r--r--clang/test/Sema/attr-format-Float16.c16
-rw-r--r--clang/test/SemaCXX/attr-format-Float16.cpp24
-rw-r--r--clang/unittests/Format/FormatTest.cpp2
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp9
-rw-r--r--compiler-rt/lib/msan/msan.h23
-rw-r--r--compiler-rt/lib/msan/msan_allocator.cpp29
-rw-r--r--flang/lib/Frontend/FrontendActions.cpp3
-rw-r--r--flang/lib/Optimizer/Transforms/AddDebugFoundation.cpp18
-rw-r--r--flang/test/Transforms/debug-line-table-existing.fir2
-rw-r--r--flang/test/Transforms/debug-line-table-inc-file.fir6
-rw-r--r--flang/test/Transforms/debug-line-table.fir12
-rw-r--r--flang/unittests/Runtime/ExternalIOTest.cpp6
-rw-r--r--libc/cmake/modules/LLVMLibCObjectRules.cmake4
-rw-r--r--libc/cmake/modules/LLVMLibCTestRules.cmake21
-rw-r--r--libc/docs/dev/code_style.rst8
-rw-r--r--libc/include/CMakeLists.txt40
-rw-r--r--libc/src/__support/HashTable/generic/bitmask_impl.inc3
-rw-r--r--libc/src/__support/HashTable/table.h2
-rw-r--r--libc/src/__support/StringUtil/CMakeLists.txt34
-rw-r--r--libc/src/__support/threads/linux/CMakeLists.txt2
-rw-r--r--libcxx/cmake/caches/Armv7M-picolibc.cmake2
-rw-r--r--libcxx/docs/FeatureTestMacroTable.rst2
-rw-r--r--libcxx/docs/ReleaseNotes/18.rst6
-rw-r--r--libcxx/docs/Status/Cxx20Issues.csv2
-rw-r--r--libcxx/docs/Status/Cxx2cPapers.csv2
-rw-r--r--libcxx/include/fstream50
-rw-r--r--libcxx/include/sstream6
-rw-r--r--libcxx/include/string2
-rw-r--r--libcxx/include/tuple32
-rw-r--r--libcxx/include/version2
-rw-r--r--libcxx/src/CMakeLists.txt1
-rw-r--r--libcxx/src/fstream.cpp37
-rw-r--r--libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp27
-rw-r--r--libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp27
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.assert.pass.cpp32
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.pass.cpp58
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/filebuf/types.pass.cpp9
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.assert.pass.cpp32
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.pass.cpp27
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/fstream/types.pass.cpp9
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.assert.pass.cpp32
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.pass.cpp27
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ifstream/types.pass.cpp9
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/native_handle_assert_test_helpers.h28
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/native_handle_test_helpers.h84
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.assert.pass.cpp32
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.pass.cpp27
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/ofstream/types.pass.cpp9
-rw-r--r--libcxx/test/std/input.output/file.streams/fstreams/types.h10
-rw-r--r--libcxx/test/std/language.support/support.limits/support.limits.general/fstream.version.compile.pass.cpp16
-rw-r--r--libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp16
-rwxr-xr-xlibcxx/utils/generate_feature_test_macro_components.py1
-rwxr-xr-xlibcxx/utils/qemu_baremetal.py6
-rw-r--r--lld/ELF/Arch/AArch64.cpp3
-rw-r--r--lld/ELF/Arch/X86_64.cpp2
-rw-r--r--lld/ELF/SyntheticSections.cpp2
-rw-r--r--lld/ELF/Writer.cpp16
-rw-r--r--lld/ELF/Writer.h1
-rw-r--r--lld/test/ELF/aarch64-memtag-android-abi.s12
-rw-r--r--lld/test/ELF/eh-frame-nonzero-offset-riscv.s53
-rw-r--r--lld/test/ELF/linkerscript/overlay-reject.test13
-rw-r--r--lld/test/ELF/linkerscript/overlay-reject2.test17
-rw-r--r--lld/test/ELF/linkerscript/overlay.test100
-rw-r--r--lld/test/ELF/x86-64-tls-pie.s18
-rw-r--r--lldb/include/lldb/Breakpoint/BreakpointIDList.h6
-rw-r--r--lldb/source/Breakpoint/BreakpointIDList.cpp21
-rw-r--r--lldb/source/Commands/CommandObjectBreakpoint.cpp4
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp21
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp20
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h14
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp9
-rw-r--r--lldb/test/API/commands/expression/nested/TestNestedExpressions.py2
-rw-r--r--lldb/test/API/functionalities/inline-sourcefile/TestInlineSourceFiles.py2
-rw-r--r--lldb/test/Shell/SymbolFile/DWARF/Inputs/dwo-static-data-member.cpp8
-rw-r--r--lldb/test/Shell/SymbolFile/DWARF/dwo-static-data-member-access.test24
-rw-r--r--llvm/docs/LangRef.rst2
-rw-r--r--llvm/include/llvm/Analysis/MemoryBuiltins.h28
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h4
-rw-r--r--llvm/include/llvm/CodeGen/SwitchLoweringUtils.h16
-rw-r--r--llvm/include/llvm/IR/PatternMatch.h2
-rw-r--r--llvm/include/llvm/Object/ELFTypes.h3
-rw-r--r--llvm/include/llvm/Passes/StandardInstrumentations.h2
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp2
-rw-r--r--llvm/lib/Analysis/MemorySSAUpdater.cpp17
-rw-r--r--llvm/lib/Analysis/VFABIDemangling.cpp9
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp79
-rw-r--r--llvm/lib/CodeGen/ReplaceWithVeclib.cpp160
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp80
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h6
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp27
-rw-r--r--llvm/lib/CodeGen/SwitchLoweringUtils.cpp81
-rw-r--r--llvm/lib/DWARFLinker/DWARFStreamer.cpp24
-rw-r--r--llvm/lib/DWARFLinkerParallel/DebugLineSectionEmitter.h13
-rw-r--r--llvm/lib/DWARFLinkerParallel/OutputSections.cpp4
-rw-r--r--llvm/lib/DWARFLinkerParallel/OutputSections.h2
-rw-r--r--llvm/lib/Object/ELF.cpp7
-rw-r--r--llvm/lib/Passes/StandardInstrumentations.cpp52
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp3
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCombine.td9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp24
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp34
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp150
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h3
-rw-r--r--llvm/lib/Target/AMDGPU/BUFInstructions.td6
-rw-r--r--llvm/lib/Target/AMDGPU/FLATInstructions.td39
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h2
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp122
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h1
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp194
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h6
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td68
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp70
-rw-r--r--llvm/lib/Target/AMDGPU/SOPInstructions.td10
-rw-r--r--llvm/lib/Target/AMDGPU/VOP2Instructions.td2
-rw-r--r--llvm/lib/Target/ARM/ARMLegalizerInfo.h1
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp22
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.h3
-rw-r--r--llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h1
-rw-r--r--llvm/lib/Target/PowerPC/PPCRegisterInfo.td9
-rw-r--r--llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp5
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp2
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp2
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h1
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h1
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp4
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp15
-rw-r--r--llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp5
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp1
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.td17
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoA.td12
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZa.td44
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp35
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp14
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.cpp4
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVUtils.h2
-rw-r--r--llvm/lib/Target/X86/CMakeLists.txt4
-rw-r--r--llvm/lib/Target/X86/GISel/X86LegalizerInfo.h1
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h16
-rw-r--r--llvm/lib/Target/X86/X86.h8
-rw-r--r--llvm/lib/Target/X86/X86CompressEVEX.cpp (renamed from llvm/lib/Target/X86/X86EvexToVex.cpp)168
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp1
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td248
-rw-r--r--llvm/lib/Target/X86/X86InstrFormats.td6
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.h4
-rw-r--r--llvm/lib/Target/X86/X86InstrUtils.td6
-rw-r--r--llvm/lib/Target/X86/X86MCInstLower.cpp7
-rw-r--r--llvm/lib/Target/X86/X86TargetMachine.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp5
-rw-r--r--llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/ConstraintElimination.cpp36
-rw-r--r--llvm/lib/Transforms/Utils/SCCPSolver.cpp10
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp16
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h29
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp5
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-split.ll87
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir25
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-zip.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/replace-with-veclib-armpl.ll (renamed from llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll)42
-rw-r--r--llvm/test/CodeGen/AArch64/replace-with-veclib-sleef-scalable.ll (renamed from llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef-scalable.ll)20
-rw-r--r--llvm/test/CodeGen/AArch64/replace-with-veclib-sleef.ll (renamed from llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll)20
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add.ll68
-rw-r--r--llvm/test/CodeGen/AArch64/vselect-ext.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-mul.mir787
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll819
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-mul.mir60
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir122
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll196
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll6645
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll313
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-saddr-load.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll277
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul.ll831
-rw-r--r--llvm/test/CodeGen/AMDGPU/waitcnt-global-inv-wb.mir29
-rw-r--r--llvm/test/CodeGen/LoongArch/global-variable-code-model.ll44
-rw-r--r--llvm/test/CodeGen/PowerPC/intrinsic-trap.ll10
-rw-r--r--llvm/test/CodeGen/PowerPC/pr47155-47156.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv32.mir15
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv64.mir15
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64d.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll70
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/variadic-call.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv32.mir58
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir58
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir32
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir36
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-ceil-floor.mir8
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv32.mir12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv64.mir14
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv32.mir12
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv64.mir16
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv32.mir32
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv64.mir36
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/addrspacecast.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/aext-to-sext.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/alloca.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/analyze-branch.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll180
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-load-store.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw.ll1070
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-signext.ll278
-rw-r--r--llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/bf16-promote.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll68
-rw-r--r--llvm/test/CodeGen/RISCV/bfloat-convert.ll74
-rw-r--r--llvm/test/CodeGen/RISCV/bfloat-frem.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/bfloat-mem.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/bfloat.ll76
-rw-r--r--llvm/test/CodeGen/RISCV/bittest.ll200
-rw-r--r--llvm/test/CodeGen/RISCV/byval.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/callee-saved-gprs.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-half.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-lp64.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/calls.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/condops.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/copysign-casts.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll60
-rw-r--r--llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll50
-rw-r--r--llvm/test/CodeGen/RISCV/div-by-constant.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/div.ll112
-rw-r--r--llvm/test/CodeGen/RISCV/double-arith-strict.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/double-arith.ll160
-rw-r--r--llvm/test/CodeGen/RISCV/double-br-fcmp.ll136
-rw-r--r--llvm/test/CodeGen/RISCV/double-calling-conv.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert-strict.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/double-convert.ll264
-rw-r--r--llvm/test/CodeGen/RISCV/double-fcmp-strict.ll128
-rw-r--r--llvm/test/CodeGen/RISCV/double-fcmp.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/double-frem.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll284
-rw-r--r--llvm/test/CodeGen/RISCV/double-intrinsics.ll230
-rw-r--r--llvm/test/CodeGen/RISCV/double-mem.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/double-previous-failure.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/double-round-conv-sat.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/double-round-conv.ll100
-rw-r--r--llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/emutls.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/exception-pointer-register.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/fastcc-float.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/fastcc-int.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/fli-licm.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/float-arith-strict.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/float-arith.ll164
-rw-r--r--llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/float-br-fcmp.ll160
-rw-r--r--llvm/test/CodeGen/RISCV/float-convert-strict.ll84
-rw-r--r--llvm/test/CodeGen/RISCV/float-convert.ll256
-rw-r--r--llvm/test/CodeGen/RISCV/float-fcmp-strict.ll128
-rw-r--r--llvm/test/CodeGen/RISCV/float-fcmp.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/float-frem.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll284
-rw-r--r--llvm/test/CodeGen/RISCV/float-intrinsics.ll220
-rw-r--r--llvm/test/CodeGen/RISCV/float-mem.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/float-round-conv-sat.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/float-round-conv.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/float-select-verify.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/float-zfa.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/fmax-fmin.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/forced-atomics.ll584
-rw-r--r--llvm/test/CodeGen/RISCV/fp128.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/fp16-promote.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/fpclamptosat.ll212
-rw-r--r--llvm/test/CodeGen/RISCV/frame-info.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/frame.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/ghccc-rv32.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/ghccc-rv64.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/half-arith.ll704
-rw-r--r--llvm/test/CodeGen/RISCV/half-br-fcmp.ll272
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert-strict.ll88
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert.ll874
-rw-r--r--llvm/test/CodeGen/RISCV/half-frem.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/half-intrinsics.ll536
-rw-r--r--llvm/test/CodeGen/RISCV/half-mem.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/half-round-conv-sat.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/half-round-conv.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/interrupt-attr.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/libcall-tail-calls.ll136
-rw-r--r--llvm/test/CodeGen/RISCV/live-sp.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/llvm.exp10.ll212
-rw-r--r--llvm/test/CodeGen/RISCV/llvm.frexp.ll348
-rw-r--r--llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/machine-outliner-throw.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/make-compressible.mir24
-rw-r--r--llvm/test/CodeGen/RISCV/mem.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/mem64.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/memcpy.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/mir-target-flags.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/mul.ll54
-rw-r--r--llvm/test/CodeGen/RISCV/nest-register.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/nomerge.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/opt-w-instrs.mir90
-rw-r--r--llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir6
-rw-r--r--llvm/test/CodeGen/RISCV/overflow-intrinsics.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/pr51206.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/pr63816.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/push-pop-popret.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rem.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/remat.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rv32xtheadbb.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbb.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-large-stack.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll50
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rv64i-complex-float.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rv64xtheadbb.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbb.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbs.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll100
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll480
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/localvar.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/memory-args.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr63596.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir57
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir4
-rw-r--r--llvm/test/CodeGen/RISCV/select-and.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/select-cc.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/select-or.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/setcc-logic.ll112
-rw-r--r--llvm/test/CodeGen/RISCV/sextw-removal.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/shadowcallstack.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/shifts.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/shrinkwrap.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/split-sp-adjust.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/split-urem-by-constant.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/srem-lkk.ll30
-rw-r--r--llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll32
-rw-r--r--llvm/test/CodeGen/RISCV/srem-vector-lkk.ll110
-rw-r--r--llvm/test/CodeGen/RISCV/stack-protector-target.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/stack-realignment.ll64
-rw-r--r--llvm/test/CodeGen/RISCV/stack-slot-size.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/stack-store-check.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/tls-models.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/urem-lkk.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/urem-vector-lkk.ll102
-rw-r--r--llvm/test/CodeGen/RISCV/vararg.ll60
-rw-r--r--llvm/test/CodeGen/RISCV/vector-abi.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/vlenb.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/zcmp-with-float.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll96
-rw-r--r--llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll66
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-14.ll8
-rw-r--r--llvm/test/CodeGen/X86/O0-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx2-gather.ll106
-rw-r--r--llvm/test/CodeGen/X86/avx2-nontemporal.ll68
-rw-r--r--llvm/test/CodeGen/X86/combine-bextr.ll48
-rw-r--r--llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86.ll6
-rw-r--r--llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86_64.ll4
-rw-r--r--llvm/test/CodeGen/X86/crc32-intrinsics-x86.ll6
-rw-r--r--llvm/test/CodeGen/X86/crc32-intrinsics-x86_64.ll4
-rw-r--r--llvm/test/CodeGen/X86/evex-to-vex-compress.mir2
-rw-r--r--llvm/test/CodeGen/X86/ftrunc.ll720
-rw-r--r--llvm/test/CodeGen/X86/i64-mem-copy.ll186
-rw-r--r--llvm/test/CodeGen/X86/inline-sse.ll28
-rw-r--r--llvm/test/CodeGen/X86/invpcid-intrinsic.ll4
-rw-r--r--llvm/test/CodeGen/X86/lea-2.ll34
-rw-r--r--llvm/test/CodeGen/X86/legalize-shl-vec.ll332
-rw-r--r--llvm/test/CodeGen/X86/movdir-intrinsic-x86.ll4
-rw-r--r--llvm/test/CodeGen/X86/movdir-intrinsic-x86_64.ll2
-rw-r--r--llvm/test/CodeGen/X86/opt-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/X86/sha.ll30
-rw-r--r--llvm/test/CodeGen/X86/test-shrink-bug.ll4
-rw-r--r--llvm/test/CodeGen/X86/vec_extract-avx.ll122
-rw-r--r--llvm/test/CodeGen/X86/vec_extract-mmx.ll66
-rw-r--r--llvm/test/CodeGen/X86/vec_extract-sse4.ll50
-rw-r--r--llvm/test/CodeGen/X86/vec_extract.ll86
-rw-r--r--llvm/test/CodeGen/X86/vec_setcc.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-lzcnt-256.ll394
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-mmx.ll84
-rw-r--r--llvm/test/CodeGen/X86/vector-tzcnt-256.ll302
-rw-r--r--llvm/test/CodeGen/X86/x64-cet-intrinsics.ll8
-rw-r--r--llvm/test/DebugInfo/X86/sdag-dangling-dbgvalue.ll10
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll18
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/debug_info_noninstrumented_alloca2.ll11
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll23
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/lifetime.ll34
-rw-r--r--llvm/test/Instrumentation/InstrProfiling/platform.ll3
-rw-r--r--llvm/test/Instrumentation/InstrProfiling/profiling.ll19
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vflat.s27
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vflat.txt27
-rw-r--r--llvm/test/MC/RISCV/function-call.s4
-rw-r--r--llvm/test/MC/RISCV/tail-call.s2
-rw-r--r--llvm/test/Other/print-at-pass-number.ll10
-rw-r--r--llvm/test/Transforms/ConstraintElimination/monotonic-int-phis-signed.ll18
-rw-r--r--llvm/test/Transforms/InstCombine/icmp.ll45
-rw-r--r--llvm/test/Transforms/InstSimplify/select.ll19
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/drop-poison-generating-flags.ll51
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-small-size.ll5
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll21
-rw-r--r--llvm/test/Transforms/SCCP/switch.ll91
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/splat-buildvector.ll25
-rw-r--r--llvm/test/Transforms/SimpleLoopUnswitch/memssa-readnone-access.ll117
-rw-r--r--llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test12
-rw-r--r--llvm/test/tools/dsymutil/ARM/dwarf5-macho.test9
-rw-r--r--llvm/test/tools/llvm-dwarfdump/AArch64/verify-no-file.yaml1
-rw-r--r--llvm/unittests/Analysis/VectorFunctionABITest.cpp48
-rw-r--r--llvm/unittests/Object/ELFObjectFileTest.cpp37
-rw-r--r--llvm/utils/TableGen/CMakeLists.txt2
-rw-r--r--llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp206
-rw-r--r--llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp210
-rw-r--r--llvm/utils/TableGen/X86FoldTablesEmitter.cpp6
-rw-r--r--llvm/utils/TableGen/X86ManualCompressEVEXTables.def331
-rw-r--r--llvm/utils/gn/secondary/libcxx/src/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/X86/BUILD.gn8
-rw-r--r--llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn4
-rw-r--r--mlir/examples/toy/Ch6/toyc.cpp3
-rw-r--r--mlir/examples/toy/Ch7/toyc.cpp3
-rw-r--r--mlir/include/mlir/Analysis/Presburger/Barvinok.h84
-rw-r--r--mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h (renamed from mlir/lib/Analysis/Presburger/GeneratingFunction.h)9
-rw-r--r--mlir/include/mlir/Analysis/Presburger/IntegerRelation.h5
-rw-r--r--mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h53
-rw-r--r--mlir/include/mlir/Dialect/GPU/IR/GPUOps.td53
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td12
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td2
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td214
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td23
-rw-r--r--mlir/include/mlir/ExecutionEngine/RunnerUtils.h4
-rw-r--r--mlir/include/mlir/IR/Dominance.h15
-rw-r--r--mlir/include/mlir/Interfaces/ControlFlowInterfaces.td4
-rw-r--r--mlir/lib/Analysis/Presburger/Barvinok.cpp65
-rw-r--r--mlir/lib/Analysis/Presburger/CMakeLists.txt1
-rw-r--r--mlir/lib/Analysis/Presburger/IntegerRelation.cpp34
-rw-r--r--mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp20
-rw-r--r--mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h13
-rw-r--r--mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp4
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp28
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp47
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp2
-rw-r--r--mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp7
-rw-r--r--mlir/lib/Dialect/GPU/IR/GPUDialect.cpp88
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp20
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp7
-rw-r--r--mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp121
-rw-r--r--mlir/lib/Dialect/SPIRV/IR/AtomicOps.cpp291
-rw-r--r--mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp43
-rw-r--r--mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp67
-rw-r--r--mlir/lib/Interfaces/ControlFlowInterfaces.cpp46
-rw-r--r--mlir/lib/Target/LLVMIR/DebugImporter.cpp15
-rw-r--r--mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp58
-rw-r--r--mlir/lib/Target/SPIRV/Deserialization/Deserializer.h13
-rw-r--r--mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp53
-rw-r--r--mlir/lib/Target/SPIRV/Serialization/Serializer.cpp66
-rw-r--r--mlir/lib/Target/SPIRV/Serialization/Serializer.h3
-rw-r--r--mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp34
-rw-r--r--mlir/python/mlir/ir.py5
-rw-r--r--mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir9
-rw-r--r--mlir/test/Conversion/MemRefToSPIRV/alloc.mlir4
-rw-r--r--mlir/test/Conversion/MemRefToSPIRV/atomic.mlir14
-rw-r--r--mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir28
-rw-r--r--mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir4
-rw-r--r--mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir2
-rw-r--r--mlir/test/Dialect/GPU/invalid.mlir2
-rw-r--r--mlir/test/Dialect/GPU/outlining.mlir74
-rw-r--r--mlir/test/Dialect/LLVMIR/add-debuginfo-func-scope.mlir48
-rw-r--r--mlir/test/Dialect/LLVMIR/call-location.mlir2
-rw-r--r--mlir/test/Dialect/LLVMIR/debuginfo.mlir6
-rw-r--r--mlir/test/Dialect/LLVMIR/global.mlir4
-rw-r--r--mlir/test/Dialect/LLVMIR/inlining.mlir25
-rw-r--r--mlir/test/Dialect/LLVMIR/invalid-call-location.mlir4
-rw-r--r--mlir/test/Dialect/LLVMIR/loop-metadata.mlir2
-rw-r--r--mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir2
-rw-r--r--mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir88
-rw-r--r--mlir/test/Dialect/SPIRV/IR/availability.mlir2
-rw-r--r--mlir/test/Dialect/SPIRV/IR/cast-ops.mlir2
-rw-r--r--mlir/test/Dialect/SPIRV/IR/function-decorations.mlir56
-rw-r--r--mlir/test/Dialect/SPIRV/IR/target-env.mlir2
-rw-r--r--mlir/test/Dialect/SPIRV/Transforms/inlining.mlir4
-rw-r--r--mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir2
-rw-r--r--mlir/test/Target/LLVMIR/Import/debug-info.ll62
-rw-r--r--mlir/test/Target/LLVMIR/Import/global-variables.ll4
-rw-r--r--mlir/test/Target/LLVMIR/llvmir-debug.mlir22
-rw-r--r--mlir/test/Target/LLVMIR/loop-metadata.mlir2
-rw-r--r--mlir/test/Target/SPIRV/atomic-ops.mlir60
-rw-r--r--mlir/test/Target/SPIRV/cast-ops.mlir2
-rw-r--r--mlir/test/Target/SPIRV/debug.mlir2
-rw-r--r--mlir/test/Target/SPIRV/function-decorations.mlir71
-rw-r--r--mlir/test/Transforms/canonicalize.mlir11
-rw-r--r--mlir/test/python/dialects/memref.py12
-rw-r--r--mlir/unittests/Analysis/Presburger/BarvinokTest.cpp48
-rw-r--r--mlir/unittests/Analysis/Presburger/CMakeLists.txt2
-rw-r--r--mlir/unittests/Analysis/Presburger/GeneratingFunctionTest.cpp39
-rw-r--r--mlir/unittests/Analysis/Presburger/IntegerRelationTest.cpp244
-rw-r--r--mlir/unittests/Analysis/Presburger/Utils.h36
-rw-r--r--openmp/CMakeLists.txt3
-rw-r--r--openmp/cmake/OpenMPTesting.cmake3
-rw-r--r--openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt8
-rw-r--r--openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp13
-rw-r--r--openmp/libomptarget/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h4
-rw-r--r--openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp1
-rw-r--r--openmp/runtime/CMakeLists.txt21
-rw-r--r--openmp/runtime/cmake/LibompGetArchitecture.cmake2
-rw-r--r--openmp/runtime/cmake/config-ix.cmake3
-rw-r--r--openmp/runtime/src/CMakeLists.txt6
-rw-r--r--openmp/runtime/src/kmp.h7
-rw-r--r--openmp/runtime/src/kmp_config.h.cmake2
-rw-r--r--openmp/runtime/src/kmp_ftn_entry.h2
-rw-r--r--openmp/runtime/src/kmp_global.cpp2
-rw-r--r--openmp/runtime/src/kmp_gsupport.cpp3
-rw-r--r--openmp/runtime/src/kmp_os.h9
-rw-r--r--openmp/runtime/src/kmp_platform.h32
-rw-r--r--openmp/runtime/src/kmp_runtime.cpp8
-rw-r--r--openmp/runtime/src/kmp_settings.cpp4
-rw-r--r--openmp/runtime/src/kmp_wrapper_getpid.h5
-rw-r--r--openmp/runtime/src/z_Linux_util.cpp21
-rw-r--r--openmp/runtime/test/lit.cfg12
-rw-r--r--polly/lib/Transform/ScheduleOptimizer.cpp25
-rw-r--r--polly/test/ScheduleOptimizer/schedule_computeout.ll99
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel3
626 files changed, 20665 insertions, 15240 deletions
diff --git a/.github/workflows/libclang-python-tests.yml b/.github/workflows/libclang-python-tests.yml
new file mode 100644
index 000000000000..73edb6cf3bad
--- /dev/null
+++ b/.github/workflows/libclang-python-tests.yml
@@ -0,0 +1,39 @@
+name: Libclang Python Binding Tests
+
+permissions:
+ contents: read
+
+on:
+ workflow_dispatch:
+ push:
+ paths:
+ - 'clang/bindings/python/**'
+ - 'clang/tools/libclang/**'
+ - 'clang/CMakeList.txt'
+ - '.github/workflows/libclang-python-tests.yml'
+ - '.github/workflows/llvm-project-tests.yml'
+ pull_request:
+ paths:
+ - 'clang/bindings/python/**'
+ - 'clang/tools/libclang/**'
+ - 'clang/CMakeList.txt'
+ - '.github/workflows/libclang-python-tests.yml'
+ - '.github/workflows/llvm-project-tests.yml'
+
+concurrency:
+ # Skip intermediate builds: always.
+ # Cancel intermediate builds: only if it is a pull request build.
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
+
+jobs:
+ check-clang-python:
+ # Build libclang and then run the libclang Python binding's unit tests.
+ name: Build and run Python unit tests
+ uses: ./.github/workflows/llvm-project-tests.yml
+ with:
+ build_target: check-clang-python
+ projects: clang
+ # There is an issue running on "windows-2019".
+ # See https://github.com/llvm/llvm-project/issues/76601#issuecomment-1873049082.
+ os_list: '["ubuntu-latest"]'
diff --git a/.github/workflows/llvm-project-tests.yml b/.github/workflows/llvm-project-tests.yml
index 02b1ab75e960..fadaea129e50 100644
--- a/.github/workflows/llvm-project-tests.yml
+++ b/.github/workflows/llvm-project-tests.yml
@@ -87,7 +87,7 @@ jobs:
# enough cache space for all the tests to run at once and still
# fit under the 10 GB limit.
max-size: 500M
- key: sccache-${{ matrix.os }}
+ key: ${{ matrix.os }}
variant: sccache
- name: Build and Test
uses: llvm/actions/build-test-llvm-project@main
diff --git a/bolt/test/RISCV/relax.s b/bolt/test/RISCV/relax.s
index bf9287e1d0d8..ec390ea76b5c 100644
--- a/bolt/test/RISCV/relax.s
+++ b/bolt/test/RISCV/relax.s
@@ -6,7 +6,7 @@
// CHECK: Binary Function "_start" after building cfg {
// CHECK: jal ra, near_f
-// CHECK-NEXT: auipc ra, far_f@plt
+// CHECK-NEXT: auipc ra, far_f
// CHECK-NEXT: jalr ra, 0xc(ra)
// CHECK-NEXT: j near_f
diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp
index 0b85ea19735e..370de12999ac 100644
--- a/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp
+++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/MissingStdForwardCheck.cpp
@@ -53,16 +53,72 @@ AST_MATCHER(ParmVarDecl, isTemplateTypeParameter) {
FuncTemplate->getTemplateParameters()->getDepth();
}
+AST_MATCHER_P(NamedDecl, hasSameNameAsBoundNode, std::string, BindingID) {
+ IdentifierInfo *II = Node.getIdentifier();
+ if (nullptr == II)
+ return false;
+ StringRef Name = II->getName();
+
+ return Builder->removeBindings(
+ [this, Name](const ast_matchers::internal::BoundNodesMap &Nodes) {
+ const DynTypedNode &BN = Nodes.getNode(this->BindingID);
+ if (const auto *ND = BN.get<NamedDecl>()) {
+ if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND))
+ return true;
+ return ND->getName() != Name;
+ }
+ return true;
+ });
+}
+
+AST_MATCHER_P(LambdaCapture, hasCaptureKind, LambdaCaptureKind, Kind) {
+ return Node.getCaptureKind() == Kind;
+}
+
+AST_MATCHER_P(LambdaExpr, hasCaptureDefaultKind, LambdaCaptureDefault, Kind) {
+ return Node.getCaptureDefault() == Kind;
+}
+
} // namespace
void MissingStdForwardCheck::registerMatchers(MatchFinder *Finder) {
+ auto RefToParmImplicit = allOf(
+ equalsBoundNode("var"), hasInitializer(ignoringParenImpCasts(
+ declRefExpr(to(equalsBoundNode("param"))))));
+ auto RefToParm = capturesVar(
+ varDecl(anyOf(hasSameNameAsBoundNode("param"), RefToParmImplicit)));
+ auto HasRefToParm = hasAnyCapture(RefToParm);
+
+ auto CaptureInRef =
+ allOf(hasCaptureDefaultKind(LambdaCaptureDefault::LCD_ByRef),
+ unless(hasAnyCapture(
+ capturesVar(varDecl(hasSameNameAsBoundNode("param"))))));
+ auto CaptureInCopy = allOf(
+ hasCaptureDefaultKind(LambdaCaptureDefault::LCD_ByCopy), HasRefToParm);
+ auto CaptureByRefExplicit = hasAnyCapture(
+ allOf(hasCaptureKind(LambdaCaptureKind::LCK_ByRef), RefToParm));
+
+ auto CapturedInBody =
+ lambdaExpr(anyOf(CaptureInRef, CaptureInCopy, CaptureByRefExplicit));
+ auto CapturedInCaptureList = hasAnyCapture(capturesVar(
+ varDecl(hasInitializer(ignoringParenImpCasts(equalsBoundNode("call"))))));
+
+ auto CapturedInLambda = hasDeclContext(cxxRecordDecl(
+ isLambda(),
+ hasParent(lambdaExpr(forCallable(equalsBoundNode("func")),
+ anyOf(CapturedInCaptureList, CapturedInBody)))));
+
auto ToParam = hasAnyParameter(parmVarDecl(equalsBoundNode("param")));
auto ForwardCallMatcher = callExpr(
- forCallable(equalsBoundNode("func")), argumentCountIs(1),
+ callExpr().bind("call"), argumentCountIs(1),
+ hasArgument(
+ 0, declRefExpr(to(
+ varDecl(optionally(equalsBoundNode("param"))).bind("var")))),
+ forCallable(anyOf(equalsBoundNode("func"), CapturedInLambda)),
callee(unresolvedLookupExpr(hasAnyDeclaration(
namedDecl(hasUnderlyingDecl(hasName("::std::forward")))))),
- hasArgument(0, declRefExpr(to(equalsBoundNode("param"))).bind("ref")),
+
unless(anyOf(hasAncestor(typeLoc()),
hasAncestor(expr(hasUnevaluatedContext())))));
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index 08ade306b5a0..1bd5a72126c1 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -301,6 +301,10 @@ Changes in existing checks
coroutine functions and increase issue detection for cases involving type
aliases with references.
+- Improved :doc:`cppcoreguidelines-missing-std-forward
+ <clang-tidy/checks/cppcoreguidelines/missing-std-forward>` check to
+ address false positives in the capture list and body of lambdas.
+
- Improved :doc:`cppcoreguidelines-narrowing-conversions
<clang-tidy/checks/cppcoreguidelines/narrowing-conversions>` check by
extending the `IgnoreConversionFromTypes` option to include types without a
diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward.cpp
index b9720db272e4..443f338ba204 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/missing-std-forward.cpp
@@ -90,9 +90,9 @@ void lambda_value_capture(T&& t) {
}
template <class T>
-void lambda_value_reference(T&& t) {
- // CHECK-MESSAGES: :[[@LINE-1]]:33: warning: forwarding reference parameter 't' is never forwarded inside the function body [cppcoreguidelines-missing-std-forward]
- [&]() { T other = std::forward<T>(t); };
+void lambda_value_capture_copy(T&& t) {
+ // CHECK-MESSAGES: :[[@LINE-1]]:36: warning: forwarding reference parameter 't' is never forwarded inside the function body [cppcoreguidelines-missing-std-forward]
+ [&,t]() { T other = std::forward<T>(t); };
}
} // namespace positive_cases
@@ -147,4 +147,29 @@ class AClass {
T data;
};
+template <class T>
+void lambda_value_reference(T&& t) {
+ [&]() { T other = std::forward<T>(t); };
+}
+
+template<typename T>
+void lambda_value_reference_capture_list_ref_1(T&& t) {
+ [=, &t] { T other = std::forward<T>(t); };
+}
+
+template<typename T>
+void lambda_value_reference_capture_list_ref_2(T&& t) {
+ [&t] { T other = std::forward<T>(t); };
+}
+
+template<typename T>
+void lambda_value_reference_capture_list(T&& t) {
+ [t = std::forward<T>(t)] { t(); };
+}
+
+template <class T>
+void lambda_value_reference_auxiliary_var(T&& t) {
+ [&x = t]() { T other = std::forward<T>(x); };
+}
+
} // namespace negative_cases
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 9b6e00b23121..c9b577bd549b 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -857,6 +857,11 @@ Bug Fixes to C++ Support
(`#64607 <https://github.com/llvm/llvm-project/issues/64607>`_)
(`#64086 <https://github.com/llvm/llvm-project/issues/64086>`_)
+- Fixed a regression where clang forgets how to substitute into constraints on template-template
+ parameters. Fixes:
+ (`#57410 <https://github.com/llvm/llvm-project/issues/57410>`_) and
+ (`#76604 <https://github.com/llvm/llvm-project/issues/57410>`_)
+
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed an import failure of recursive friend class template.
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index d150e08d5f5e..a97182cad5d5 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -6160,23 +6160,19 @@ def err_illegal_initializer_type : Error<"illegal initializer type %0">;
def ext_init_list_type_narrowing : ExtWarn<
"type %0 cannot be narrowed to %1 in initializer list">,
InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
-// *_narrowing_const_reference diagnostics have the same messages, but are
-// controlled by -Wc++11-narrowing-const-reference for narrowing involving a
-// const reference.
def ext_init_list_type_narrowing_const_reference : ExtWarn<
- "type %0 cannot be narrowed to %1 in initializer list">,
+ ext_init_list_type_narrowing.Summary>,
InGroup<CXX11NarrowingConstReference>, DefaultError, SFINAEFailure;
def ext_init_list_variable_narrowing : ExtWarn<
"non-constant-expression cannot be narrowed from type %0 to %1 in "
"initializer list">, InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
def ext_init_list_variable_narrowing_const_reference : ExtWarn<
- "non-constant-expression cannot be narrowed from type %0 to %1 in "
- "initializer list">, InGroup<CXX11NarrowingConstReference>, DefaultError, SFINAEFailure;
+ ext_init_list_variable_narrowing.Summary>, InGroup<CXX11NarrowingConstReference>, DefaultError, SFINAEFailure;
def ext_init_list_constant_narrowing : ExtWarn<
"constant expression evaluates to %0 which cannot be narrowed to type %1">,
InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
def ext_init_list_constant_narrowing_const_reference : ExtWarn<
- "constant expression evaluates to %0 which cannot be narrowed to type %1">,
+ ext_init_list_constant_narrowing.Summary>,
InGroup<CXX11NarrowingConstReference>, DefaultError, SFINAEFailure;
def warn_init_list_type_narrowing : Warning<
"type %0 cannot be narrowed to %1 in initializer list in C++11">,
diff --git a/clang/include/clang/Basic/OpenACCKinds.h b/clang/include/clang/Basic/OpenACCKinds.h
index 3eb0bf84208f..f6a628db29cf 100644
--- a/clang/include/clang/Basic/OpenACCKinds.h
+++ b/clang/include/clang/Basic/OpenACCKinds.h
@@ -93,6 +93,9 @@ enum class OpenACCClauseKind {
/// 'default' clause, allowed on parallel, serial, kernel (and compound)
/// constructs.
Default,
+ /// 'if' clause, allowed on all the Compute Constructs, Data Constructs,
+ /// Executable Constructs, and Combined Constructs.
+ If,
/// Represents an invalid clause, for the purposes of parsing.
Invalid,
};
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 9ffae72346f2..5e5570bb42a1 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -2034,23 +2034,25 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
return ToDCOrErr.takeError();
}
- DeclContext *ToDC = *ToDCOrErr;
- // Remove all declarations, which may be in wrong order in the
- // lexical DeclContext and then add them in the proper order.
- for (auto *D : FromDC->decls()) {
- if (!MightNeedReordering(D))
- continue;
+ if (const auto *FromRD = dyn_cast<RecordDecl>(FromDC)) {
+ DeclContext *ToDC = *ToDCOrErr;
+ // Remove all declarations, which may be in wrong order in the
+ // lexical DeclContext and then add them in the proper order.
+ for (auto *D : FromRD->decls()) {
+ if (!MightNeedReordering(D))
+ continue;
- assert(D && "DC contains a null decl");
- if (Decl *ToD = Importer.GetAlreadyImportedOrNull(D)) {
- // Remove only the decls which we successfully imported.
- assert(ToDC == ToD->getLexicalDeclContext() && ToDC->containsDecl(ToD));
- // Remove the decl from its wrong place in the linked list.
- ToDC->removeDecl(ToD);
- // Add the decl to the end of the linked list.
- // This time it will be at the proper place because the enclosing for
- // loop iterates in the original (good) order of the decls.
- ToDC->addDeclInternal(ToD);
+ assert(D && "DC contains a null decl");
+ if (Decl *ToD = Importer.GetAlreadyImportedOrNull(D)) {
+ // Remove only the decls which we successfully imported.
+ assert(ToDC == ToD->getLexicalDeclContext() && ToDC->containsDecl(ToD));
+ // Remove the decl from its wrong place in the linked list.
+ ToDC->removeDecl(ToD);
+ // Add the decl to the end of the linked list.
+ // This time it will be at the proper place because the enclosing for
+ // loop iterates in the original (good) order of the decls.
+ ToDC->addDeclInternal(ToD);
+ }
}
}
diff --git a/clang/lib/AST/FormatString.cpp b/clang/lib/AST/FormatString.cpp
index e0c9e18cfe3a..c5d14b4af7ff 100644
--- a/clang/lib/AST/FormatString.cpp
+++ b/clang/lib/AST/FormatString.cpp
@@ -488,7 +488,6 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
return NoMatchPromotionTypeConfusion;
break;
case BuiltinType::Half:
- case BuiltinType::Float16:
case BuiltinType::Float:
if (T == C.DoubleTy)
return MatchPromotion;
diff --git a/clang/lib/AST/Interp/Interp.cpp b/clang/lib/AST/Interp/Interp.cpp
index a82d1c3c7c62..21ea2503b94b 100644
--- a/clang/lib/AST/Interp/Interp.cpp
+++ b/clang/lib/AST/Interp/Interp.cpp
@@ -290,10 +290,10 @@ bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
}
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
- if (!CheckDummy(S, OpPC, Ptr))
- return false;
if (!CheckLive(S, OpPC, Ptr, AK_Read))
return false;
+ if (!CheckDummy(S, OpPC, Ptr))
+ return false;
if (!CheckExtern(S, OpPC, Ptr))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Read))
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index 828d4ea35526..c05dea0cc55d 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -1813,9 +1813,6 @@ inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.peek<Pointer>();
- if (!CheckArray(S, OpPC, Ptr))
- return false;
-
if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
return false;
@@ -1843,9 +1840,6 @@ inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
const T &Offset = S.Stk.pop<T>();
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckArray(S, OpPC, Ptr))
- return false;
-
if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
return false;
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 6bc57a83a2d5..59ae12eed940 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -237,22 +237,15 @@ ArrayRef<Builtin::Info> RISCVTargetInfo::getTargetBuiltins() const {
static std::vector<std::string>
collectNonISAExtFeature(ArrayRef<std::string> FeaturesNeedOverride, int XLen) {
- auto ParseResult =
- llvm::RISCVISAInfo::parseFeatures(XLen, FeaturesNeedOverride);
-
- if (!ParseResult) {
- consumeError(ParseResult.takeError());
- return std::vector<std::string>();
- }
-
- std::vector<std::string> ImpliedFeatures = (*ParseResult)->toFeatureVector();
-
std::vector<std::string> NonISAExtFeatureVec;
+ auto IsNonISAExtFeature = [](const std::string &Feature) {
+ assert(Feature.size() > 1 && (Feature[0] == '+' || Feature[0] == '-'));
+ StringRef Ext = StringRef(Feature).drop_front(); // drop the +/-
+ return !llvm::RISCVISAInfo::isSupportedExtensionFeature(Ext);
+ };
llvm::copy_if(FeaturesNeedOverride, std::back_inserter(NonISAExtFeatureVec),
- [&](const std::string &Feat) {
- return !llvm::is_contained(ImpliedFeatures, Feat);
- });
+ IsNonISAExtFeature);
return NonISAExtFeatureVec;
}
diff --git a/clang/lib/Driver/ToolChains/MinGW.cpp b/clang/lib/Driver/ToolChains/MinGW.cpp
index 65512f16357d..18fc9d4b6807 100644
--- a/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -471,12 +471,23 @@ findClangRelativeSysroot(const Driver &D, const llvm::Triple &LiteralTriple,
return make_error_code(std::errc::no_such_file_or_directory);
}
+static bool looksLikeMinGWSysroot(const std::string &Directory) {
+ StringRef Sep = llvm::sys::path::get_separator();
+ if (!llvm::sys::fs::exists(Directory + Sep + "include" + Sep + "_mingw.h"))
+ return false;
+ if (!llvm::sys::fs::exists(Directory + Sep + "lib" + Sep + "libkernel32.a"))
+ return false;
+ return true;
+}
+
toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
+ std::string InstallBase =
+ std::string(llvm::sys::path::parent_path(getDriver().getInstalledDir()));
// The sequence for detecting a sysroot here should be kept in sync with
// the testTriple function below.
llvm::Triple LiteralTriple = getLiteralTriple(D, getTriple());
@@ -487,13 +498,17 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
else if (llvm::ErrorOr<std::string> TargetSubdir = findClangRelativeSysroot(
getDriver(), LiteralTriple, getTriple(), SubdirName))
Base = std::string(llvm::sys::path::parent_path(TargetSubdir.get()));
+ // If the install base of Clang seems to have mingw sysroot files directly
+ // in the toplevel include and lib directories, use this as base instead of
+ // looking for a triple prefixed GCC in the path.
+ else if (looksLikeMinGWSysroot(InstallBase))
+ Base = InstallBase;
else if (llvm::ErrorOr<std::string> GPPName =
findGcc(LiteralTriple, getTriple()))
Base = std::string(llvm::sys::path::parent_path(
llvm::sys::path::parent_path(GPPName.get())));
else
- Base = std::string(
- llvm::sys::path::parent_path(getDriver().getInstalledDir()));
+ Base = InstallBase;
Base += llvm::sys::path::get_separator();
findGccLibDir(LiteralTriple);
@@ -778,9 +793,15 @@ static bool testTriple(const Driver &D, const llvm::Triple &Triple,
if (D.SysRoot.size())
return true;
llvm::Triple LiteralTriple = getLiteralTriple(D, Triple);
+ std::string InstallBase =
+ std::string(llvm::sys::path::parent_path(D.getInstalledDir()));
if (llvm::ErrorOr<std::string> TargetSubdir =
findClangRelativeSysroot(D, LiteralTriple, Triple, SubdirName))
return true;
+ // If the install base itself looks like a mingw sysroot, we'll use that
+ // - don't use any potentially unrelated gcc to influence what triple to use.
+ if (looksLikeMinGWSysroot(InstallBase))
+ return false;
if (llvm::ErrorOr<std::string> GPPName = findGcc(LiteralTriple, Triple))
return true;
// If we neither found a colocated sysroot or a matching gcc executable,
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 3ac3aa3c5e3a..8b43438c72df 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -5151,6 +5151,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
if (Left.IsUnterminatedLiteral)
return true;
+ // FIXME: Breaking after newlines seems useful in general. Turn this into an
+ // option and recognize more cases like endl etc, and break independent of
+ // what comes after operator lessless.
+ if (Right.is(tok::lessless) && Right.Next &&
+ Right.Next->is(tok::string_literal) && Left.is(tok::string_literal) &&
+ Left.TokenText.ends_with("\\n\"")) {
+ return true;
+ }
if (Right.is(TT_RequiresClause)) {
switch (Style.RequiresClausePosition) {
case FormatStyle::RCPS_OwnLine:
diff --git a/clang/lib/Lex/ModuleMap.cpp b/clang/lib/Lex/ModuleMap.cpp
index ea5d13deb114..42d55d09ea5a 100644
--- a/clang/lib/Lex/ModuleMap.cpp
+++ b/clang/lib/Lex/ModuleMap.cpp
@@ -984,7 +984,9 @@ static void inferFrameworkLink(Module *Mod) {
assert(!Mod->isSubFramework() &&
"Can only infer linking for top-level frameworks");
- Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
+ StringRef FrameworkName(Mod->Name);
+ FrameworkName.consume_back("_Private");
+ Mod->LinkLibraries.push_back(Module::LinkLibrary(FrameworkName.str(),
/*IsFramework=*/true));
}
diff --git a/clang/lib/Parse/ParseOpenACC.cpp b/clang/lib/Parse/ParseOpenACC.cpp
index 94c3d0c4e164..84e994ef0081 100644
--- a/clang/lib/Parse/ParseOpenACC.cpp
+++ b/clang/lib/Parse/ParseOpenACC.cpp
@@ -80,6 +80,10 @@ OpenACCClauseKind getOpenACCClauseKind(Token Tok) {
if (Tok.is(tok::kw_default))
return OpenACCClauseKind::Default;
+ // if is also a keyword, make sure we parse it correctly.
+ if (Tok.is(tok::kw_if))
+ return OpenACCClauseKind::If;
+
if (!Tok.is(tok::identifier))
return OpenACCClauseKind::Invalid;
@@ -88,6 +92,7 @@ OpenACCClauseKind getOpenACCClauseKind(Token Tok) {
.Case("auto", OpenACCClauseKind::Auto)
.Case("default", OpenACCClauseKind::Default)
.Case("finalize", OpenACCClauseKind::Finalize)
+ .Case("if", OpenACCClauseKind::If)
.Case("if_present", OpenACCClauseKind::IfPresent)
.Case("independent", OpenACCClauseKind::Independent)
.Case("nohost", OpenACCClauseKind::NoHost)
@@ -324,7 +329,7 @@ OpenACCDirectiveKind ParseOpenACCDirectiveKind(Parser &P) {
}
bool ClauseHasRequiredParens(OpenACCClauseKind Kind) {
- return Kind == OpenACCClauseKind::Default;
+ return Kind == OpenACCClauseKind::Default || Kind == OpenACCClauseKind::If;
}
bool ParseOpenACCClauseParams(Parser &P, OpenACCClauseKind Kind) {
@@ -356,6 +361,19 @@ bool ParseOpenACCClauseParams(Parser &P, OpenACCClauseKind Kind) {
break;
}
+ case OpenACCClauseKind::If: {
+ // FIXME: It isn't clear if the spec saying 'condition' means the same as
+ // it does in an if/while/etc (See ParseCXXCondition), however as it was
+ // written with Fortran/C in mind, we're going to assume it just means an
+ // 'expression evaluating to boolean'.
+ ExprResult CondExpr =
+ P.getActions().CorrectDelayedTyposInExpr(P.ParseExpression());
+ // An invalid expression can be just about anything, so just give up on
+ // this clause list.
+ if (CondExpr.isInvalid())
+ return true;
+ break;
+ }
default:
llvm_unreachable("Not a required parens type?");
}
@@ -372,8 +390,10 @@ bool ParseOpenACCClauseParams(Parser &P, OpenACCClauseKind Kind) {
// However, they all are named with a single-identifier (or auto/default!)
// token, followed in some cases by either braces or parens.
bool ParseOpenACCClause(Parser &P) {
- if (!P.getCurToken().isOneOf(tok::identifier, tok::kw_auto, tok::kw_default))
- return P.Diag(P.getCurToken(), diag::err_expected) << tok::identifier;
+ // A number of clause names are actually keywords, so accept a keyword that
+ // can be converted to a name.
+ if (expectIdentifierOrKeyword(P))
+ return true;
OpenACCClauseKind Kind = getOpenACCClauseKind(P.getCurToken());
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index e469e420f14f..408ee5f77580 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -10377,11 +10377,6 @@ void InitializationSequence::dump() const {
dump(llvm::errs());
}
-static bool NarrowingErrs(const LangOptions &L) {
- return L.CPlusPlus11 &&
- (!L.MicrosoftExt || L.isCompatibleWithMSVC(LangOptions::MSVC2015));
-}
-
static void DiagnoseNarrowingInInitList(Sema &S,
const ImplicitConversionSequence &ICS,
QualType PreNarrowingType,
@@ -10402,6 +10397,19 @@ static void DiagnoseNarrowingInInitList(Sema &S,
return;
}
+ auto MakeDiag = [&](bool IsConstRef, unsigned DefaultDiagID,
+ unsigned ConstRefDiagID, unsigned WarnDiagID) {
+ unsigned DiagID;
+ auto &L = S.getLangOpts();
+ if (L.CPlusPlus11 &&
+ (!L.MicrosoftExt || L.isCompatibleWithMSVC(LangOptions::MSVC2015)))
+ DiagID = IsConstRef ? ConstRefDiagID : DefaultDiagID;
+ else
+ DiagID = WarnDiagID;
+ return S.Diag(PostInit->getBeginLoc(), DiagID)
+ << PostInit->getSourceRange();
+ };
+
// C++11 [dcl.init.list]p7: Check whether this is a narrowing conversion.
APValue ConstantValue;
QualType ConstantType;
@@ -10417,13 +10425,9 @@ static void DiagnoseNarrowingInInitList(Sema &S,
// narrowing conversion even if the value is a constant and can be
// represented exactly as an integer.
QualType T = EntityType.getNonReferenceType();
- S.Diag(PostInit->getBeginLoc(),
- NarrowingErrs(S.getLangOpts())
- ? (T == EntityType
- ? diag::ext_init_list_type_narrowing
- : diag::ext_init_list_type_narrowing_const_reference)
- : diag::warn_init_list_type_narrowing)
- << PostInit->getSourceRange()
+ MakeDiag(T != EntityType, diag::ext_init_list_type_narrowing,
+ diag::ext_init_list_type_narrowing_const_reference,
+ diag::warn_init_list_type_narrowing)
<< PreNarrowingType.getLocalUnqualifiedType()
<< T.getLocalUnqualifiedType();
break;
@@ -10431,14 +10435,10 @@ static void DiagnoseNarrowingInInitList(Sema &S,
case NK_Constant_Narrowing: {
// A constant value was narrowed.
- QualType T = EntityType.getNonReferenceType();
- S.Diag(PostInit->getBeginLoc(),
- NarrowingErrs(S.getLangOpts())
- ? (T == EntityType
- ? diag::ext_init_list_constant_narrowing
- : diag::ext_init_list_constant_narrowing_const_reference)
- : diag::warn_init_list_constant_narrowing)
- << PostInit->getSourceRange()
+ MakeDiag(EntityType.getNonReferenceType() != EntityType,
+ diag::ext_init_list_constant_narrowing,
+ diag::ext_init_list_constant_narrowing_const_reference,
+ diag::warn_init_list_constant_narrowing)
<< ConstantValue.getAsString(S.getASTContext(), ConstantType)
<< EntityType.getNonReferenceType().getLocalUnqualifiedType();
break;
@@ -10446,14 +10446,10 @@ static void DiagnoseNarrowingInInitList(Sema &S,
case NK_Variable_Narrowing: {
// A variable's value may have been narrowed.
- QualType T = EntityType.getNonReferenceType();
- S.Diag(PostInit->getBeginLoc(),
- NarrowingErrs(S.getLangOpts())
- ? (T == EntityType
- ? diag::ext_init_list_variable_narrowing
- : diag::ext_init_list_variable_narrowing_const_reference)
- : diag::warn_init_list_variable_narrowing)
- << PostInit->getSourceRange()
+ MakeDiag(EntityType.getNonReferenceType() != EntityType,
+ diag::ext_init_list_variable_narrowing,
+ diag::ext_init_list_variable_narrowing_const_reference,
+ diag::warn_init_list_variable_narrowing)
<< PreNarrowingType.getLocalUnqualifiedType()
<< EntityType.getNonReferenceType().getLocalUnqualifiedType();
break;
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 9fb767101e1e..e6c267bb79e6 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -1259,6 +1259,43 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
if ((OldTemplate == nullptr) != (NewTemplate == nullptr))
return true;
+ if (NewTemplate) {
+ // C++ [temp.over.link]p4:
+ // The signature of a function template consists of its function
+ // signature, its return type and its template parameter list. The names
+ // of the template parameters are significant only for establishing the
+ // relationship between the template parameters and the rest of the
+ // signature.
+ //
+ // We check the return type and template parameter lists for function
+ // templates first; the remaining checks follow.
+ bool SameTemplateParameterList = SemaRef.TemplateParameterListsAreEqual(
+ NewTemplate, NewTemplate->getTemplateParameters(), OldTemplate,
+ OldTemplate->getTemplateParameters(), false, Sema::TPL_TemplateMatch);
+ bool SameReturnType = SemaRef.Context.hasSameType(
+ Old->getDeclaredReturnType(), New->getDeclaredReturnType());
+ // FIXME(GH58571): Match template parameter list even for non-constrained
+ // template heads. This currently ensures that the code prior to C++20 is
+ // not newly broken.
+ bool ConstraintsInTemplateHead =
+ NewTemplate->getTemplateParameters()->hasAssociatedConstraints() ||
+ OldTemplate->getTemplateParameters()->hasAssociatedConstraints();
+ // C++ [namespace.udecl]p11:
+ // The set of declarations named by a using-declarator that inhabits a
+ // class C does not include member functions and member function
+ // templates of a base class that "correspond" to (and thus would
+ // conflict with) a declaration of a function or function template in
+ // C.
+ // Comparing return types is not required for the "correspond" check to
+ // decide whether a member introduced by a shadow declaration is hidden.
+ if (UseMemberUsingDeclRules && ConstraintsInTemplateHead &&
+ !SameTemplateParameterList)
+ return true;
+ if (!UseMemberUsingDeclRules &&
+ (!SameTemplateParameterList || !SameReturnType))
+ return true;
+ }
+
// Is the function New an overload of the function Old?
QualType OldQType = SemaRef.Context.getCanonicalType(Old->getType());
QualType NewQType = SemaRef.Context.getCanonicalType(New->getType());
@@ -1410,43 +1447,6 @@ static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
}
}
- if (NewTemplate) {
- // C++ [temp.over.link]p4:
- // The signature of a function template consists of its function
- // signature, its return type and its template parameter list. The names
- // of the template parameters are significant only for establishing the
- // relationship between the template parameters and the rest of the
- // signature.
- //
- // We check the return type and template parameter lists for function
- // templates first; the remaining checks follow.
- bool SameTemplateParameterList = SemaRef.TemplateParameterListsAreEqual(
- NewTemplate, NewTemplate->getTemplateParameters(), OldTemplate,
- OldTemplate->getTemplateParameters(), false, Sema::TPL_TemplateMatch);
- bool SameReturnType = SemaRef.Context.hasSameType(
- Old->getDeclaredReturnType(), New->getDeclaredReturnType());
- // FIXME(GH58571): Match template parameter list even for non-constrained
- // template heads. This currently ensures that the code prior to C++20 is
- // not newly broken.
- bool ConstraintsInTemplateHead =
- NewTemplate->getTemplateParameters()->hasAssociatedConstraints() ||
- OldTemplate->getTemplateParameters()->hasAssociatedConstraints();
- // C++ [namespace.udecl]p11:
- // The set of declarations named by a using-declarator that inhabits a
- // class C does not include member functions and member function
- // templates of a base class that "correspond" to (and thus would
- // conflict with) a declaration of a function or function template in
- // C.
- // Comparing return types is not required for the "correspond" check to
- // decide whether a member introduced by a shadow declaration is hidden.
- if (UseMemberUsingDeclRules && ConstraintsInTemplateHead &&
- !SameTemplateParameterList)
- return true;
- if (!UseMemberUsingDeclRules &&
- (!SameTemplateParameterList || !SameReturnType))
- return true;
- }
-
if (!UseOverrideRules) {
Expr *NewRC = New->getTrailingRequiresClause(),
*OldRC = Old->getTrailingRequiresClause();
@@ -13994,21 +13994,19 @@ ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
OverloadCandidateSet::iterator Best;
OverloadingResult OverloadResult =
CandidateSet.BestViableFunction(*this, Fn->getBeginLoc(), Best);
- FunctionDecl *FDecl = Best->Function;
// Model the case with a call to a templated function whose definition
// encloses the call and whose return type contains a placeholder type as if
// the UnresolvedLookupExpr was type-dependent.
- if (OverloadResult == OR_Success && FDecl &&
- FDecl->isTemplateInstantiation() &&
- FDecl->getReturnType()->isUndeducedType()) {
- if (auto TP = FDecl->getTemplateInstantiationPattern(false)) {
- if (TP->willHaveBody()) {
- CallExpr *CE =
- CallExpr::Create(Context, Fn, Args, Context.DependentTy, VK_PRValue,
- RParenLoc, CurFPFeatureOverrides());
- result = CE;
- return result;
+ if (OverloadResult == OR_Success) {
+ const FunctionDecl *FDecl = Best->Function;
+ if (FDecl && FDecl->isTemplateInstantiation() &&
+ FDecl->getReturnType()->isUndeducedType()) {
+ if (const auto *TP =
+ FDecl->getTemplateInstantiationPattern(/*ForDefinition=*/false);
+ TP && TP->willHaveBody()) {
+ return CallExpr::Create(Context, Fn, Args, Context.DependentTy,
+ VK_PRValue, RParenLoc, CurFPFeatureOverrides());
}
}
}
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 09dd119ed1b9..7f20413c104e 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -344,15 +344,26 @@ MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
using namespace TemplateInstArgsHelpers;
const Decl *CurDecl = ND;
+
+ if (!CurDecl)
+ CurDecl = Decl::castFromDeclContext(DC);
+
if (Innermost) {
Result.addOuterTemplateArguments(const_cast<NamedDecl *>(ND),
Innermost->asArray(), Final);
- CurDecl = Response::UseNextDecl(ND).NextDecl;
+ // Populate placeholder template arguments for TemplateTemplateParmDecls.
+ // This is essential for the case e.g.
+ //
+ // template <class> concept Concept = false;
+ // template <template <Concept C> class T> void foo(T<int>)
+ //
+ // where parameter C has a depth of 1 but the substituting argument `int`
+ // has a depth of 0.
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(CurDecl))
+ HandleDefaultTempArgIntoTempTempParam(TTP, Result);
+ CurDecl = Response::UseNextDecl(CurDecl).NextDecl;
}
- if (!ND)
- CurDecl = Decl::castFromDeclContext(DC);
-
while (!CurDecl->isFileContextDecl()) {
Response R;
if (const auto *VarTemplSpec =
@@ -380,10 +391,8 @@ MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
R = Response::ChangeDecl(CTD->getLexicalDeclContext());
} else if (!isa<DeclContext>(CurDecl)) {
R = Response::DontClearRelativeToPrimaryNextDecl(CurDecl);
- if (CurDecl->getDeclContext()->isTranslationUnit()) {
- if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(CurDecl)) {
- R = HandleDefaultTempArgIntoTempTempParam(TTP, Result);
- }
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(CurDecl)) {
+ R = HandleDefaultTempArgIntoTempTempParam(TTP, Result);
}
} else {
R = HandleGenericDeclContext(CurDecl);
diff --git a/clang/test/AST/Interp/arrays.cpp b/clang/test/AST/Interp/arrays.cpp
index c455731e7669..4aa10da55dd3 100644
--- a/clang/test/AST/Interp/arrays.cpp
+++ b/clang/test/AST/Interp/arrays.cpp
@@ -72,6 +72,14 @@ constexpr int getElementFromEnd(const int *Arr, int size, int index) {
static_assert(getElementFromEnd(data, 5, 0) == 1, "");
static_assert(getElementFromEnd(data, 5, 4) == 5, "");
+constexpr int getFirstElem(const int *a) {
+ return a[0]; // expected-note {{read of dereferenced null pointer}} \
+ // ref-note {{read of dereferenced null pointer}}
+}
+static_assert(getFirstElem(nullptr) == 1, ""); // expected-error {{not an integral constant expression}} \
+ // expected-note {{in call to}} \
+ // ref-error {{not an integral constant expression}} \
+ // ref-note {{in call to}}
constexpr static int arr[2] = {1,2};
constexpr static int arr2[2] = {3,4};
diff --git a/clang/test/CXX/over/over.load/p2-0x.cpp b/clang/test/CXX/over/over.load/p2-0x.cpp
index 183f3cb322af..8fd9a1ce1e87 100644
--- a/clang/test/CXX/over/over.load/p2-0x.cpp
+++ b/clang/test/CXX/over/over.load/p2-0x.cpp
@@ -24,6 +24,11 @@ class Y {
void k() &&; // expected-error{{cannot overload a member function with ref-qualifier '&&' with a member function without a ref-qualifier}}
};
+struct GH76358 {
+ template<int> void f() && {}
+ template<typename T> void f() const {}
+};
+
#if __cplusplus >= 202002L
namespace GH58962 {
diff --git a/clang/test/CXX/temp/temp.arg/temp.arg.template/p3-2a.cpp b/clang/test/CXX/temp/temp.arg/temp.arg.template/p3-2a.cpp
index 449b6232542e..f58606963861 100644
--- a/clang/test/CXX/temp/temp.arg/temp.arg.template/p3-2a.cpp
+++ b/clang/test/CXX/temp/temp.arg/temp.arg.template/p3-2a.cpp
@@ -59,3 +59,32 @@ struct Nothing {};
// FIXME: Wait the standard to clarify the intent.
template<> template<> Z<Nothing> S5<Z>::V<Nothing>;
+
+namespace GH57410 {
+
+template<typename T>
+concept True = true;
+
+template<typename T>
+concept False = false; // #False
+
+template <class> struct S {};
+
+template<template<True T> typename Wrapper>
+using Test = Wrapper<int>;
+
+template<template<False T> typename Wrapper> // #TTP-Wrapper
+using Test = Wrapper<int>; // expected-error {{constraints not satisfied for template template parameter 'Wrapper' [with T = int]}}
+
+// expected-note@#TTP-Wrapper {{'int' does not satisfy 'False'}}
+// expected-note@#False {{evaluated to false}}
+
+template <typename U, template<False> typename T>
+void foo(T<U>); // #foo
+
+void bar() {
+ foo<int>(S<int>{}); // expected-error {{no matching function for call to 'foo'}}
+ // expected-note@#foo {{substitution failure [with U = int]: constraints not satisfied for template template parameter 'T' [with $0 = int]}}
+}
+
+}
diff --git a/clang/test/CodeGen/RISCV/riscv-func-attr-target.c b/clang/test/CodeGen/RISCV/riscv-func-attr-target.c
index 506acaba6874..759c33a22506 100644
--- a/clang/test/CodeGen/RISCV/riscv-func-attr-target.c
+++ b/clang/test/CodeGen/RISCV/riscv-func-attr-target.c
@@ -40,8 +40,8 @@ __attribute__((target("cpu=sifive-u54"))) void testAttrCpuOnly() {}
// CHECK: attributes #1 = { {{.*}}"target-cpu"="rocket-rv64" "target-features"="+64bit,+a,+d,+f,+m,+save-restore,+v,+zicsr,+zifencei,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b,-relax,-zbb,-zfa" "tune-cpu"="generic-rv64" }
// CHECK: attributes #2 = { {{.*}}"target-features"="+64bit,+a,+m,+save-restore,+zbb,+zifencei,-relax,-zfa" }
// CHECK: attributes #3 = { {{.*}}"target-features"="+64bit,+a,+d,+experimental-zicond,+f,+m,+save-restore,+v,+zbb,+zicsr,+zifencei,+zve32f,+zve32x,+zve64d,+zve64f,+zve64x,+zvl128b,+zvl32b,+zvl64b,-relax,-zfa" }
-// CHECK: attributes #4 = { {{.*}}"target-features"="+64bit,+a,+c,+d,+f,+m,+save-restore,+zbb,+zicsr,+zifencei,-relax,-zfa" }
-// CHECK: attributes #5 = { {{.*}}"target-features"="+64bit,+m,+save-restore,-relax,-zbb,-zfa" }
+// CHECK: attributes #4 = { {{.*}}"target-features"="+64bit,+a,+c,+d,+f,+m,+save-restore,+zbb,+zicsr,+zifencei,-relax" }
+// CHECK: attributes #5 = { {{.*}}"target-features"="+64bit,+m,+save-restore,-relax" }
// CHECK: attributes #6 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+a,+m,+save-restore,+zbb,+zifencei,-relax,-zfa" }
-// CHECK: attributes #7 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+m,+save-restore,-relax,-zbb,-zfa" }
-// CHECK: attributes #8 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+a,+c,+d,+f,+m,+save-restore,+zicsr,+zifencei,-relax,-zbb,-zfa" }
+// CHECK: attributes #7 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+m,+save-restore,-relax" }
+// CHECK: attributes #8 = { {{.*}}"target-cpu"="sifive-u54" "target-features"="+64bit,+a,+c,+d,+f,+m,+save-restore,+zicsr,+zifencei,-relax" }
diff --git a/clang/test/Driver/linker-wrapper-image.c b/clang/test/Driver/linker-wrapper-image.c
index 40dde2e02918..03caa1eb084e 100644
--- a/clang/test/Driver/linker-wrapper-image.c
+++ b/clang/test/Driver/linker-wrapper-image.c
@@ -19,7 +19,7 @@
// OPENMP-COFF: @__start_omp_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries$OA"
// OPENMP-COFF-NEXT: @__stop_omp_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries$OZ"
-// OPENMP-NEXT: @.omp_offloading.device_image = internal unnamed_addr constant [[[SIZE:[0-9]+]] x i8] c"\10\FF\10\AD\01{{.*}}", section ".llvm.offloading", align 8
+// OPENMP: @.omp_offloading.device_image = internal unnamed_addr constant [[[SIZE:[0-9]+]] x i8] c"\10\FF\10\AD{{.*}}", section ".llvm.offloading", align 8
// OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { ptr getelementptr inbounds ([[[BEGIN:[0-9]+]] x i8], ptr @.omp_offloading.device_image, i64 1, i64 0), ptr getelementptr inbounds ([[[END:[0-9]+]] x i8], ptr @.omp_offloading.device_image, i64 1, i64 0), ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }]
// OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, ptr @.omp_offloading.device_images, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }
// OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_reg, ptr null }]
diff --git a/clang/test/Driver/mingw-sysroot.cpp b/clang/test/Driver/mingw-sysroot.cpp
index 911dab492707..50152b2ca210 100644
--- a/clang/test/Driver/mingw-sysroot.cpp
+++ b/clang/test/Driver/mingw-sysroot.cpp
@@ -14,6 +14,12 @@
// RUN: ln -s %S/Inputs/mingw_ubuntu_posix_tree/usr/x86_64-w64-mingw32 %T/testroot-clang/x86_64-w64-mingw32
// RUN: ln -s %S/Inputs/mingw_arch_tree/usr/i686-w64-mingw32 %T/testroot-clang/i686-w64-mingw32
+// RUN: rm -rf %T/testroot-clang-native
+// RUN: mkdir -p %T/testroot-clang-native/bin
+// RUN: ln -s %clang %T/testroot-clang-native/bin/clang
+// RUN: mkdir -p %T/testroot-clang-native/include/_mingw.h
+// RUN: mkdir -p %T/testroot-clang-native/lib/libkernel32.a
+
// RUN: rm -rf %T/testroot-custom-triple
// RUN: mkdir -p %T/testroot-custom-triple/bin
// RUN: ln -s %clang %T/testroot-custom-triple/bin/clang
@@ -58,6 +64,28 @@
// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-gcc/bin/x86_64-w64-mingw32-clang -target x86_64-w64-mingw32 -rtlib=platform -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_GCC %s
+// If we're executing clang from a directory with what looks like a mingw sysroot,
+// with headers in <base>/include and libs in <base>/lib, use that rather than looking
+// for another GCC in the path.
+//
+// Note, this test has a surprising quirk: We're testing with an install directory,
+// testroot-clang-native, which lacks the "x86_64-w64-mingw32" subdirectory, it only
+// has the include and lib subdirectories without any triple prefix.
+//
+// Since commit fd15cb935d7aae25ad62bfe06fe9f17cea585978, we avoid using the
+// <base>/include and <base>/lib directories when cross compiling. So technically, this
+// case testcase only works exactly as expected when running on x86_64 Windows, when
+// this target isn't considered cross compiling.
+//
+// However we do still pass the include directory <base>/x86_64-w64-mingw32/include to
+// the -cc1 interface, even if it is missing. Thus, this test looks for this path name,
+// that indicates that we did choose the right base, even if this particular directory
+// actually doesn't exist here.
+
+// RUN: env "PATH=%T/testroot-gcc/bin:%PATH%" %T/testroot-clang-native/bin/clang -target x86_64-w64-mingw32 -rtlib=compiler-rt -stdlib=libstdc++ --sysroot="" -c -### %s 2>&1 | FileCheck -check-prefix=CHECK_TESTROOT_CLANG_NATIVE %s
+// CHECK_TESTROOT_CLANG_NATIVE: "{{[^"]+}}/testroot-clang-native{{/|\\\\}}x86_64-w64-mingw32{{/|\\\\}}include"
+
+
// If the user requests a different arch via the -m32 option, which changes
// x86_64 into i386, check that the driver notices that it can't find a
// sysroot for i386 but there is one for i686, and uses that one.
diff --git a/clang/test/Driver/riscv-rvv-vector-bits.c b/clang/test/Driver/riscv-rvv-vector-bits.c
index e92b66c972da..24af5f0c73c6 100644
--- a/clang/test/Driver/riscv-rvv-vector-bits.c
+++ b/clang/test/Driver/riscv-rvv-vector-bits.c
@@ -44,7 +44,7 @@
// CHECK-BAD-VALUE-ERROR: error: unsupported argument '{{.*}}' to option '-mrvv-vector-bits='
-// Error if using attribute without -msve-vector-bits=<bits> or if using -msve-vector-bits=<bits>+ syntax
+// Error if using attribute without -mrvv-vector-bits=<bits> or if using -mrvv-vector-bits=<bits>+ syntax
// -----------------------------------------------------------------------------
// RUN: not %clang -c %s -o /dev/null -target riscv64-linux-gnu \
// RUN: -march=rv64gc_zve64x 2>&1 | FileCheck --check-prefix=CHECK-NO-FLAG-ERROR %s
diff --git a/clang/test/Modules/Inputs/AutolinkTBD.framework/AutolinkTBD.tbd b/clang/test/Modules/Inputs/AutolinkTBD.framework/AutolinkTBD.tbd
deleted file mode 100644
index 4aa0f85d0d56..000000000000
--- a/clang/test/Modules/Inputs/AutolinkTBD.framework/AutolinkTBD.tbd
+++ /dev/null
@@ -1 +0,0 @@
-empty file - clang only needs to check if it exists.
diff --git a/clang/test/Modules/Inputs/AutolinkTBD.framework/Headers/AutolinkTBD.h b/clang/test/Modules/Inputs/AutolinkTBD.framework/Headers/AutolinkTBD.h
deleted file mode 100644
index 914983c49636..000000000000
--- a/clang/test/Modules/Inputs/AutolinkTBD.framework/Headers/AutolinkTBD.h
+++ /dev/null
@@ -1 +0,0 @@
-extern int foo(void);
diff --git a/clang/test/Modules/autolinkTBD.m b/clang/test/Modules/autolinkTBD.m
deleted file mode 100644
index 69253294f7b8..000000000000
--- a/clang/test/Modules/autolinkTBD.m
+++ /dev/null
@@ -1,16 +0,0 @@
-// UNSUPPORTED: target={{.*}}-zos{{.*}}, target={{.*}}-aix{{.*}}
-// RUN: rm -rf %t
-// RUN: %clang_cc1 -emit-llvm -o - -fmodules-cache-path=%t -fmodules -fimplicit-module-maps -F %S/Inputs %s | FileCheck %s
-// RUN: %clang_cc1 -emit-llvm -fno-autolink -o - -fmodules-cache-path=%t -fmodules -fimplicit-module-maps -F %S/Inputs %s | FileCheck --check-prefix=CHECK-AUTOLINK-DISABLED %s
-
-@import AutolinkTBD;
-
-int f(void) {
- return foo();
-}
-
-// CHECK: !llvm.linker.options = !{![[AUTOLINK_FRAMEWORK:[0-9]+]]}
-// CHECK: ![[AUTOLINK_FRAMEWORK]] = !{!"-framework", !"AutolinkTBD"}
-
-// CHECK-AUTOLINK-DISABLED: !llvm.module.flags
-// CHECK-AUTOLINK-DISABLED-NOT: !llvm.linker.options
diff --git a/clang/test/Modules/autolink_private_module.m b/clang/test/Modules/autolink_private_module.m
new file mode 100644
index 000000000000..54bebc3a587b
--- /dev/null
+++ b/clang/test/Modules/autolink_private_module.m
@@ -0,0 +1,25 @@
+// Test that autolink hints for frameworks don't use the private module name.
+// RUN: rm -rf %t && mkdir %t
+// RUN: split-file %s %t
+
+// RUN: %clang_cc1 -emit-llvm -o - -fmodules-cache-path=%t/ModuleCache -fmodules -fimplicit-module-maps -F %t/Frameworks %t/test.m | FileCheck %s
+
+// CHECK: !{!"-framework", !"Autolink"}
+// CHECK-NOT: !{!"-framework", !"Autolink_Private"}
+
+//--- test.m
+#include <Autolink/Autolink.h>
+#include <Autolink/Autolink_Private.h>
+
+//--- Frameworks/Autolink.framework/Headers/Autolink.h
+void public();
+
+//--- Frameworks/Autolink.framework/PrivateHeaders/Autolink_Private.h
+void private();
+
+//--- Frameworks/Autolink.framework/Modules/module.modulemap
+framework module Autolink { header "Autolink.h"}
+
+//--- Frameworks/Autolink.framework/Modules/module.private.modulemap
+framework module Autolink_Private { header "Autolink_Private.h"}
+
diff --git a/clang/test/Modules/explicit-specializations.cppm b/clang/test/Modules/explicit-specializations.cppm
new file mode 100644
index 000000000000..914144018e88
--- /dev/null
+++ b/clang/test/Modules/explicit-specializations.cppm
@@ -0,0 +1,133 @@
+// Testing that the compiler can select the correct template specialization
+// from different template aliasing.
+//
+// RUN: rm -rf %t
+// RUN: split-file %s %t
+// RUN: cd %t
+//
+// RUN: %clang_cc1 -std=c++20 %t/a.cppm -emit-module-interface -o %t/a.pcm
+// RUN: %clang_cc1 -std=c++20 %t/b.cpp -fprebuilt-module-path=%t \
+// RUN: -fsyntax-only -verify
+
+//--- a.cppm
+
+// For template type parameters
+export module a;
+export template <class C>
+struct S {
+ static constexpr bool selected = false;
+};
+
+export struct A {};
+
+export template <>
+struct S<A> {
+ static constexpr bool selected = true;
+};
+
+export using B = A;
+
+// For template template parameters
+
+export template <template<typename> typename C>
+struct V {
+ static constexpr bool selected = false;
+};
+
+export template <>
+struct V<S> {
+ static constexpr bool selected = true;
+};
+
+// For template non type parameters
+export template <int X>
+struct Numbers {
+ static constexpr bool selected = false;
+ static constexpr int value = X;
+};
+
+export template<>
+struct Numbers<43> {
+ static constexpr bool selected = true;
+ static constexpr int value = 43;
+};
+
+export template <const int *>
+struct Pointers {
+ static constexpr bool selected = false;
+};
+
+export int IntegralValue = 0;
+export template<>
+struct Pointers<&IntegralValue> {
+ static constexpr bool selected = true;
+};
+
+export template <void *>
+struct NullPointers {
+ static constexpr bool selected = false;
+};
+
+export template<>
+struct NullPointers<nullptr> {
+ static constexpr bool selected = true;
+};
+
+export template<int (&)[5]>
+struct Array {
+ static constexpr bool selected = false;
+};
+
+export int array[5];
+export template<>
+struct Array<array> {
+ static constexpr bool selected = true;
+};
+
+//--- b.cpp
+// expected-no-diagnostics
+import a;
+
+// Testing for different qualifiers
+static_assert(S<B>::selected);
+static_assert(S<::B>::selected);
+static_assert(::S<B>::selected);
+static_assert(::S<::B>::selected);
+typedef A C;
+static_assert(S<C>::selected);
+static_assert(S<::C>::selected);
+static_assert(::S<C>::selected);
+static_assert(::S<::C>::selected);
+
+namespace D {
+ C getAType();
+ typedef C E;
+}
+
+static_assert(S<D::E>::selected);
+static_assert(S<decltype(D::getAType())>::selected);
+
+// Testing we can select the correct specialization for different
+// template template argument alising.
+
+static_assert(V<S>::selected);
+static_assert(V<::S>::selected);
+static_assert(::V<S>::selected);
+static_assert(::V<::S>::selected);
+
+// Testing for template non type parameters
+static_assert(Numbers<43>::selected);
+static_assert(Numbers<21 * 2 + 1>::selected);
+static_assert(Numbers<42 + 1>::selected);
+static_assert(Numbers<44 - 1>::selected);
+static_assert(Numbers<Numbers<43>::value>::selected);
+static_assert(!Numbers<44>::selected);
+
+static_assert(Pointers<&IntegralValue>::selected);
+static_assert(!Pointers<nullptr>::selected);
+static_assert(NullPointers<nullptr>::selected);
+static_assert(!NullPointers<(void*)&IntegralValue>::selected);
+
+static_assert(Array<array>::selected);
+int another_array[5];
+static_assert(!Array<another_array>::selected);
diff --git a/clang/test/ParserOpenACC/parse-clauses.c b/clang/test/ParserOpenACC/parse-clauses.c
index aedf0c711ad1..b247210ff6c7 100644
--- a/clang/test/ParserOpenACC/parse-clauses.c
+++ b/clang/test/ParserOpenACC/parse-clauses.c
@@ -148,8 +148,92 @@ void DefaultClause() {
// expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
#pragma acc serial default(present), seq
for(;;){}
+}
+
+void IfClause() {
+ // expected-error@+2{{expected '('}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial loop if
+ for(;;){}
+ // expected-error@+2{{expected '('}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if seq
+ for(;;){}
+
+ // expected-error@+2{{expected '('}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if, seq
+ for(;;){}
+
+ // expected-error@+2{{expected expression}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if(
+ for(;;){}
+ // expected-error@+2{{use of undeclared identifier 'seq'}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if( seq
+ for(;;){}
+
+ // expected-error@+3{{expected expression}}
+ // expected-error@+2{{use of undeclared identifier 'seq'}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if(, seq
+ for(;;){}
+
+ // expected-error@+3{{expected '('}}
+ // expected-error@+2{{expected identifier}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if)
+ for(;;){}
+
+ // expected-error@+3{{expected '('}}
+ // expected-error@+2{{expected identifier}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if) seq
+ for(;;){}
+
+ // expected-error@+3{{expected '('}}
+ // expected-error@+2{{expected identifier}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if), seq
+ for(;;){}
+
+ // expected-error@+2{{expected expression}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if()
+ for(;;){}
+
+ // expected-error@+2{{expected expression}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if() seq
+ for(;;){}
+
+ // expected-error@+2{{expected expression}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if(), seq
+ for(;;){}
+
+ // expected-error@+2{{use of undeclared identifier 'invalid_expr'}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if(invalid_expr)
+ for(;;){}
+
+ // expected-error@+2{{expected expression}}
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if() seq
+ for(;;){}
+
+ int i, j;
+
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if(i > j)
+ for(;;){}
+
+ // expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
+#pragma acc serial if(1+5>3), seq
+ for(;;){}
}
// expected-warning@+1{{OpenACC directives not yet implemented, pragma ignored}}
diff --git a/clang/test/Sema/attr-format-Float16.c b/clang/test/Sema/attr-format-Float16.c
new file mode 100644
index 000000000000..6c3dfe14cec3
--- /dev/null
+++ b/clang/test/Sema/attr-format-Float16.c
@@ -0,0 +1,16 @@
+// RUN: %clang_cc1 -fsyntax-only -verify -triple i686-linux-pc -target-feature +sse2 %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-linux-pc %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple spir-unknown-unknown %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple armv7a-linux-gnu %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple aarch64-linux-gnu %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple riscv32 %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple riscv64 %s
+
+void a(const char *a, ...) __attribute__((format(printf, 1, 2))); // no-error
+
+void b(char *a, _Float16 b) __attribute__((format(printf, 1, 2))); // expected-warning {{GCC requires a function with the 'format' attribute to be variadic}}
+
+void call_no_default_promotion(void) {
+ a("%f", (_Float16)1.0); // expected-warning{{format specifies type 'double' but the argument has type '_Float16'}}
+ b("%f", (_Float16)1.0); // expected-warning{{format specifies type 'double' but the argument has type '_Float16'}}
+}
diff --git a/clang/test/SemaCXX/attr-format-Float16.cpp b/clang/test/SemaCXX/attr-format-Float16.cpp
new file mode 100644
index 000000000000..c61611d6b6a0
--- /dev/null
+++ b/clang/test/SemaCXX/attr-format-Float16.cpp
@@ -0,0 +1,24 @@
+// RUN: %clang_cc1 -fsyntax-only -verify -triple i686-linux-pc -target-feature +sse2 %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-linux-pc %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple spir-unknown-unknown %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple armv7a-linux-gnu %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple aarch64-linux-gnu %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple riscv32 %s
+// RUN: %clang_cc1 -fsyntax-only -verify -triple riscv64 %s
+
+template <typename... Args>
+__attribute__((format(printf, 1, 2)))
+void format(const char *fmt, Args &&...args); // expected-warning{{GCC requires a function with the 'format' attribute to be variadic}}
+
+template<typename... Args>
+__attribute__((format(scanf, 1, 2)))
+int scan(const char *fmt, Args &&...args); // expected-warning{{GCC requires a function with the 'format' attribute to be variadic}}
+
+void do_format() {
+ format("%f", (_Float16)123.f); // expected-warning{{format specifies type 'double' but the argument has type '_Float16'}}
+
+ _Float16 Float16;
+ scan("%f", &Float16); // expected-warning{{format specifies type 'float *' but the argument has type '_Float16 *'}}
+ scan("%lf", &Float16); // expected-warning{{format specifies type 'double *' but the argument has type '_Float16 *'}}
+ scan("%Lf", &Float16); // expected-warning{{format specifies type 'long double *' but the argument has type '_Float16 *'}}
+}
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 881993ede17c..25ef5c680af8 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -26708,6 +26708,8 @@ TEST_F(FormatTest, PPBranchesInBracedInit) {
TEST_F(FormatTest, StreamOutputOperator) {
verifyFormat("std::cout << \"foo\" << \"bar\" << baz;");
+ verifyFormat("std::cout << \"foo\\n\"\n"
+ " << \"bar\";");
}
TEST_F(FormatTest, BreakAdjacentStringLiterals) {
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index 2cafc0438ffb..decc0785c5cd 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -2499,6 +2499,15 @@ TEST_F(TokenAnnotatorTest, BraceKind) {
EXPECT_BRACE_KIND(Tokens[6], BK_Block);
}
+TEST_F(TokenAnnotatorTest, StreamOperator) {
+ auto Tokens = annotate("\"foo\\n\" << aux << \"foo\\n\" << \"foo\";");
+ ASSERT_EQ(Tokens.size(), 9u) << Tokens;
+ EXPECT_FALSE(Tokens[1]->MustBreakBefore);
+ EXPECT_FALSE(Tokens[3]->MustBreakBefore);
+ // Only break between string literals if the former ends with \n.
+ EXPECT_TRUE(Tokens[5]->MustBreakBefore);
+}
+
} // namespace
} // namespace format
} // namespace clang
diff --git a/compiler-rt/lib/msan/msan.h b/compiler-rt/lib/msan/msan.h
index 25fa2212bdad..753e6b260734 100644
--- a/compiler-rt/lib/msan/msan.h
+++ b/compiler-rt/lib/msan/msan.h
@@ -255,18 +255,19 @@ char *GetProcSelfMaps();
void InitializeInterceptors();
void MsanAllocatorInit();
-void MsanDeallocate(StackTrace *stack, void *ptr);
-
-void *msan_malloc(uptr size, StackTrace *stack);
-void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack);
-void *msan_realloc(void *ptr, uptr size, StackTrace *stack);
-void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack);
-void *msan_valloc(uptr size, StackTrace *stack);
-void *msan_pvalloc(uptr size, StackTrace *stack);
-void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
-void *msan_memalign(uptr alignment, uptr size, StackTrace *stack);
+void MsanDeallocate(BufferedStackTrace *stack, void *ptr);
+
+void *msan_malloc(uptr size, BufferedStackTrace *stack);
+void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
+void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack);
+void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
+ BufferedStackTrace *stack);
+void *msan_valloc(uptr size, BufferedStackTrace *stack);
+void *msan_pvalloc(uptr size, BufferedStackTrace *stack);
+void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);
+void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack);
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack);
+ BufferedStackTrace *stack);
void InstallTrapHandler();
void InstallAtExitHandler();
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index 72a7f980d39f..987c894c79d4 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -178,7 +178,7 @@ void MsanThreadLocalMallocStorage::CommitBack() {
allocator.DestroyCache(GetAllocatorCache(this));
}
-static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
+static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
bool zeroise) {
if (size > max_malloc_size) {
if (AllocatorMayReturnNull()) {
@@ -229,7 +229,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
return allocated;
}
-void MsanDeallocate(StackTrace *stack, void *p) {
+void MsanDeallocate(BufferedStackTrace *stack, void *p) {
CHECK(p);
UnpoisonParam(1);
RunFreeHooks(p);
@@ -259,8 +259,8 @@ void MsanDeallocate(StackTrace *stack, void *p) {
}
}
-static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
- uptr alignment) {
+static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,
+ uptr new_size, uptr alignment) {
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
uptr old_size = meta->requested_size;
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
@@ -284,7 +284,7 @@ static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
return new_p;
}
-static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
if (AllocatorMayReturnNull())
return nullptr;
@@ -320,15 +320,15 @@ static uptr AllocationSizeFast(const void *p) {
return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
}
-void *msan_malloc(uptr size, StackTrace *stack) {
+void *msan_malloc(uptr size, BufferedStackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
}
-void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
}
-void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
+void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
if (!ptr)
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
if (size == 0) {
@@ -338,7 +338,8 @@ void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
}
-void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
+void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
+ BufferedStackTrace *stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
errno = errno_ENOMEM;
if (AllocatorMayReturnNull())
@@ -348,11 +349,11 @@ void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
return msan_realloc(ptr, nmemb * size, stack);
}
-void *msan_valloc(uptr size, StackTrace *stack) {
+void *msan_valloc(uptr size, BufferedStackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
}
-void *msan_pvalloc(uptr size, StackTrace *stack) {
+void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
@@ -365,7 +366,7 @@ void *msan_pvalloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
}
-void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
+void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
@@ -375,7 +376,7 @@ void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
-void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
+void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
@@ -386,7 +387,7 @@ void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
}
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack) {
+ BufferedStackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
if (AllocatorMayReturnNull())
return errno_EINVAL;
diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp
index d4a3e164d207..1f8174bdcf22 100644
--- a/flang/lib/Frontend/FrontendActions.cpp
+++ b/flang/lib/Frontend/FrontendActions.cpp
@@ -743,9 +743,6 @@ void CodeGenAction::generateLLVMIR() {
MLIRToLLVMPassPipelineConfig config(level, opts);
- const auto targetOpts = ci.getInvocation().getTargetOpts();
- const llvm::Triple triple(targetOpts.triple);
-
if (auto vsr = getVScaleRange(ci)) {
config.VScaleMin = vsr->first;
config.VScaleMax = vsr->second;
diff --git a/flang/lib/Optimizer/Transforms/AddDebugFoundation.cpp b/flang/lib/Optimizer/Transforms/AddDebugFoundation.cpp
index be8f26dc678c..16b8db7ec8c6 100644
--- a/flang/lib/Optimizer/Transforms/AddDebugFoundation.cpp
+++ b/flang/lib/Optimizer/Transforms/AddDebugFoundation.cpp
@@ -65,9 +65,9 @@ void AddDebugFoundationPass::runOnOperation() {
mlir::LLVM::DIFileAttr fileAttr = getFileAttr(inputFilePath);
mlir::StringAttr producer = mlir::StringAttr::get(context, "Flang");
mlir::LLVM::DICompileUnitAttr cuAttr = mlir::LLVM::DICompileUnitAttr::get(
- context, llvm::dwarf::getLanguage("DW_LANG_Fortran95"), fileAttr,
- producer, /*isOptimized=*/false,
- mlir::LLVM::DIEmissionKind::LineTablesOnly);
+ context, mlir::DistinctAttr::create(mlir::UnitAttr::get(context)),
+ llvm::dwarf::getLanguage("DW_LANG_Fortran95"), fileAttr, producer,
+ /*isOptimized=*/false, mlir::LLVM::DIEmissionKind::LineTablesOnly);
module.walk([&](mlir::func::FuncOp funcOp) {
mlir::Location l = funcOp->getLoc();
@@ -93,8 +93,18 @@ void AddDebugFoundationPass::runOnOperation() {
context, llvm::dwarf::getCallingConvention("DW_CC_normal"),
{bT, bT});
mlir::LLVM::DIFileAttr funcFileAttr = getFileAttr(funcFilePath);
+
+ // Only definitions need a distinct identifier and a compilation unit.
+ mlir::DistinctAttr id;
+ mlir::LLVM::DICompileUnitAttr compilationUnit;
+ if (!funcOp.isExternal()) {
+ id = mlir::DistinctAttr::create(mlir::UnitAttr::get(context));
+ compilationUnit = cuAttr;
+ }
mlir::LLVM::DISubprogramAttr spAttr = mlir::LLVM::DISubprogramAttr::get(
- context, cuAttr, fileAttr, funcName, funcName, funcFileAttr, /*line=*/1,
+ context, id, compilationUnit, fileAttr, funcName, funcName,
+ funcFileAttr,
+ /*line=*/1,
/*scopeline=*/1, mlir::LLVM::DISubprogramFlags::Definition,
subTypeAttr);
funcOp->setLoc(builder.getFusedLoc({funcOp->getLoc()}, spAttr));
diff --git a/flang/test/Transforms/debug-line-table-existing.fir b/flang/test/Transforms/debug-line-table-existing.fir
index 3585ef9ee2c4..3c81d75dbd66 100644
--- a/flang/test/Transforms/debug-line-table-existing.fir
+++ b/flang/test/Transforms/debug-line-table-existing.fir
@@ -12,7 +12,7 @@ module attributes {} {
#di_file = #llvm.di_file<"simple.f90" in "/home/user01/llvm-project/build_release">
#loc = loc("/home/user01/llvm-project/build_release/simple.f90":0:0)
#loc1 = loc("/home/user01/llvm-project/build_release/simple.f90":1:1)
-#di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_Fortran95, file = #di_file, producer = "Flang", isOptimized = false, emissionKind = LineTablesOnly>
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_Fortran95, file = #di_file, producer = "Flang", isOptimized = false, emissionKind = LineTablesOnly>
#di_subroutine_type = #llvm.di_subroutine_type<callingConvention = DW_CC_normal, types = #di_basic_type, #di_basic_type>
#di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "_QPs1", linkageName = "_QPs1", file = #di_file, line = 1, scopeLine = 1, subprogramFlags = Definition, type = #di_subroutine_type>
#loc2 = loc(fused<#di_subprogram>[#loc1])
diff --git a/flang/test/Transforms/debug-line-table-inc-file.fir b/flang/test/Transforms/debug-line-table-inc-file.fir
index 5fdc384e0bdb..f809ab99b472 100644
--- a/flang/test/Transforms/debug-line-table-inc-file.fir
+++ b/flang/test/Transforms/debug-line-table-inc-file.fir
@@ -30,8 +30,8 @@ module attributes {} {
// CHECK: #[[MODULE_LOC]] = loc("{{.*}}simple.f90":0:0)
// CHECK: #[[LOC_INC_FILE:.*]] = loc("{{.*}}inc.f90":1:1)
// CHECK: #[[LOC_FILE:.*]] = loc("{{.*}}simple.f90":3:1)
-// CHECK: #[[DI_CU:.*]] = #llvm.di_compile_unit<sourceLanguage = DW_LANG_Fortran95, file = #[[DI_FILE]], producer = "Flang", isOptimized = false, emissionKind = LineTablesOnly>
-// CHECK: #[[DI_SP_INC:.*]] = #llvm.di_subprogram<compileUnit = #[[DI_CU]], scope = #[[DI_FILE]], name = "_QPsinc", linkageName = "_QPsinc", file = #[[DI_INC_FILE]], {{.*}}>
-// CHECK: #[[DI_SP:.*]] = #llvm.di_subprogram<compileUnit = #[[DI_CU]], scope = #[[DI_FILE]], name = "_QQmain", linkageName = "_QQmain", file = #[[DI_FILE]], {{.*}}>
+// CHECK: #[[DI_CU:.*]] = #llvm.di_compile_unit<id = distinct[{{.*}}]<>, sourceLanguage = DW_LANG_Fortran95, file = #[[DI_FILE]], producer = "Flang", isOptimized = false, emissionKind = LineTablesOnly>
+// CHECK: #[[DI_SP_INC:.*]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #[[DI_CU]], scope = #[[DI_FILE]], name = "_QPsinc", linkageName = "_QPsinc", file = #[[DI_INC_FILE]], {{.*}}>
+// CHECK: #[[DI_SP:.*]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #[[DI_CU]], scope = #[[DI_FILE]], name = "_QQmain", linkageName = "_QQmain", file = #[[DI_FILE]], {{.*}}>
// CHECK: #[[FUSED_LOC_INC_FILE]] = loc(fused<#[[DI_SP_INC]]>[#[[LOC_INC_FILE]]])
// CHECK: #[[FUSED_LOC_FILE]] = loc(fused<#[[DI_SP]]>[#[[LOC_FILE]]])
diff --git a/flang/test/Transforms/debug-line-table.fir b/flang/test/Transforms/debug-line-table.fir
index fa59aeb4aa3a..f091d97ce89e 100644
--- a/flang/test/Transforms/debug-line-table.fir
+++ b/flang/test/Transforms/debug-line-table.fir
@@ -5,20 +5,26 @@ module attributes { fir.defaultkind = "a1c4d8i4l4r4", fir.kindmap = "", llvm.dat
func.func @_QPsb() {
return loc(#loc_sb)
} loc(#loc_sb)
+ func.func private @decl() -> i32 loc(#loc_decl)
} loc(#loc_module)
#loc_module = loc("./simple.f90":1:1)
#loc_sb = loc("./simple.f90":2:1)
+#loc_decl = loc("./simple.f90":10:1)
// CHECK: module attributes
// CHECK: func.func @[[SB_NAME:.*]]() {
// CHECK: return loc(#[[SB_LOC:.*]])
// CHECK: } loc(#[[FUSED_SB_LOC:.*]])
+// CHECK: func.func private @[[DECL_NAME:.*]]() -> i32 loc(#[[FUSED_DECL_LOC:.*]])
// CHECK: } loc(#[[MODULE_LOC:.*]])
// CHECK: #di_basic_type = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "void", encoding = DW_ATE_address>
// CHECK: #di_file = #llvm.di_file<"[[FILE_NAME:.*]]" in "[[DIR_NAME:.*]]">
// CHECK: #[[MODULE_LOC]] = loc("[[DIR_NAME]]/[[FILE_NAME]]":1:1)
// CHECK: #[[SB_LOC]] = loc("./simple.f90":2:1)
-// CHECK: #di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_Fortran95, file = #di_file, producer = "Flang", isOptimized = false, emissionKind = LineTablesOnly>
+// CHECK: #[[DECL_LOC:.*]] = loc("./simple.f90":10:1)
+// CHECK: #di_compile_unit = #llvm.di_compile_unit<id = distinct[{{.*}}]<>, sourceLanguage = DW_LANG_Fortran95, file = #di_file, producer = "Flang", isOptimized = false, emissionKind = LineTablesOnly>
// CHECK: #di_subroutine_type = #llvm.di_subroutine_type<callingConvention = DW_CC_normal, types = #di_basic_type, #di_basic_type>
-// CHECK: #di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "[[SB_NAME]]", linkageName = "[[SB_NAME]]", file = #di_file, line = 1, scopeLine = 1, subprogramFlags = Definition, type = #di_subroutine_type>
-// CHECK: #[[FUSED_SB_LOC]] = loc(fused<#di_subprogram>[#[[SB_LOC]]])
+// CHECK: #[[SB_SUBPROGRAM:.*]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #di_compile_unit, scope = #di_file, name = "[[SB_NAME]]", linkageName = "[[SB_NAME]]", file = #di_file, line = 1, scopeLine = 1, subprogramFlags = Definition, type = #di_subroutine_type>
+// CHECK: #[[DECL_SUBPROGRAM:.*]] = #llvm.di_subprogram<scope = #di_file, name = "[[DECL_NAME]]", linkageName = "[[DECL_NAME]]", file = #di_file, line = 1, scopeLine = 1, subprogramFlags = Definition, type = #di_subroutine_type>
+// CHECK: #[[FUSED_SB_LOC]] = loc(fused<#[[SB_SUBPROGRAM]]>[#[[SB_LOC]]])
+// CHECK: #[[FUSED_DECL_LOC]] = loc(fused<#[[DECL_SUBPROGRAM]]>[#[[DECL_LOC]]])
diff --git a/flang/unittests/Runtime/ExternalIOTest.cpp b/flang/unittests/Runtime/ExternalIOTest.cpp
index 76fdb6cb68ae..13327964e12a 100644
--- a/flang/unittests/Runtime/ExternalIOTest.cpp
+++ b/flang/unittests/Runtime/ExternalIOTest.cpp
@@ -935,17 +935,13 @@ TEST(ExternalIOTests, BigUnitNumbers) {
IONAME(CheckUnitNumberInRange64)(unit64Bad, true), IostatUnitOverflow);
EXPECT_EQ(
IONAME(CheckUnitNumberInRange64)(unit64Bad2, true), IostatUnitOverflow);
- EXPECT_EQ(
- IONAME(CheckUnitNumberInRange64)(unit64Bad, true), IostatUnitOverflow);
- EXPECT_EQ(
- IONAME(CheckUnitNumberInRange64)(unit64Bad2, true), IostatUnitOverflow);
constexpr std::size_t n{80};
char expectedMsg[n + 1];
expectedMsg[n] = '\0';
std::snprintf(expectedMsg, n, "UNIT number %jd is out of range",
static_cast<std::intmax_t>(unit64Bad));
EXPECT_DEATH(
- IONAME(CheckUnitNumberInRange64)(2147483648, false), expectedMsg);
+ IONAME(CheckUnitNumberInRange64)(unit64Bad, false), expectedMsg);
for (auto i{std::strlen(expectedMsg)}; i < n; ++i) {
expectedMsg[i] = ' ';
}
diff --git a/libc/cmake/modules/LLVMLibCObjectRules.cmake b/libc/cmake/modules/LLVMLibCObjectRules.cmake
index c3e3fa2dccfe..6eba17ae9201 100644
--- a/libc/cmake/modules/LLVMLibCObjectRules.cmake
+++ b/libc/cmake/modules/LLVMLibCObjectRules.cmake
@@ -43,6 +43,10 @@ function(_get_common_compile_options output_var flags)
list(APPEND compile_options "-fno-rtti")
list(APPEND compile_options "-Wall")
list(APPEND compile_options "-Wextra")
+ # -DLIBC_WNO_ERROR=ON if you can't build cleanly with -Werror.
+ if(NOT LIBC_WNO_ERROR)
+ list(APPEND compile_options "-Werror")
+ endif()
list(APPEND compile_options "-Wconversion")
list(APPEND compile_options "-Wno-sign-conversion")
list(APPEND compile_options "-Wimplicit-fallthrough")
diff --git a/libc/cmake/modules/LLVMLibCTestRules.cmake b/libc/cmake/modules/LLVMLibCTestRules.cmake
index b69839afebf8..24f543f6e4c1 100644
--- a/libc/cmake/modules/LLVMLibCTestRules.cmake
+++ b/libc/cmake/modules/LLVMLibCTestRules.cmake
@@ -498,10 +498,14 @@ function(add_integration_test test_name)
libc.src.string.memcpy
libc.src.string.memmove
libc.src.string.memset
- # __stack_chk_fail should always be included to allow building libc with
- # stack protector.
- libc.src.compiler.__stack_chk_fail
)
+
+ if(libc.src.compiler.__stack_chk_fail IN_LIST TARGET_LLVMLIBC_ENTRYPOINTS)
+ # __stack_chk_fail should always be included if supported to allow building
+ # libc with the stack protector enabled.
+ list(APPEND fq_deps_list libc.src.compiler.__stack_chk_fail)
+ endif()
+
list(REMOVE_DUPLICATES fq_deps_list)
# TODO: Instead of gathering internal object files from entrypoints,
@@ -668,12 +672,15 @@ function(add_libc_hermetic_test test_name)
libc.src.string.memmove
libc.src.string.memset
libc.src.__support.StringUtil.error_to_string
- # __stack_chk_fail should always be included to allow building libc with
- # stack protector.
- libc.src.compiler.__stack_chk_fail
)
- if(TARGET libc.src.time.clock)
+ if(libc.src.compiler.__stack_chk_fail IN_LIST TARGET_LLVMLIBC_ENTRYPOINTS)
+ # __stack_chk_fail should always be included if supported to allow building
+ # libc with the stack protector enabled.
+ list(APPEND fq_deps_list libc.src.compiler.__stack_chk_fail)
+ endif()
+
+ if(libc.src.time.clock IN_LIST TARGET_LLVMLIBC_ENTRYPOINTS)
# We will link in the 'clock' implementation if it exists for test timing.
list(APPEND fq_deps_list libc.src.time.clock)
endif()
diff --git a/libc/docs/dev/code_style.rst b/libc/docs/dev/code_style.rst
index a050a4c1d3dd..eeeced0359ad 100644
--- a/libc/docs/dev/code_style.rst
+++ b/libc/docs/dev/code_style.rst
@@ -178,3 +178,11 @@ these functions do not call the constructors and destructors of the
allocated/deallocated objects. So, use these functions carefully and only
when it is absolutely clear that constructor and destructor invocation is
not required.
+
+Warnings in sources
+===================
+
+We expect contributions to be free of warnings from the `minimum supported
+compiler versions`__ (and newer).
+
+.. __: https://libc.llvm.org/compiler_support.html#minimum-supported-versions
diff --git a/libc/include/CMakeLists.txt b/libc/include/CMakeLists.txt
index 59c6c4a9bb42..2c2d1b9b0fd1 100644
--- a/libc/include/CMakeLists.txt
+++ b/libc/include/CMakeLists.txt
@@ -184,24 +184,28 @@ add_gen_header(
.llvm-libc-macros.generic_error_number_macros
)
-add_gen_header(
- signal
- DEF_FILE signal.h.def
- PARAMS
- platform_signal=../config/${LIBC_TARGET_OS}/signal.h.in
- GEN_HDR signal.h
- DATA_FILES
- ../config/${LIBC_TARGET_OS}/signal.h.in
- DEPENDS
- .llvm-libc-macros.signal_macros
- .llvm-libc-types.sig_atomic_t
- .llvm-libc-types.sigset_t
- .llvm-libc-types.struct_sigaction
- .llvm-libc-types.union_sigval
- .llvm-libc-types.siginfo_t
- .llvm-libc-types.stack_t
- .llvm-libc-types.pid_t
-)
+if(EXISTS "${LIBC_SOURCE_DIR}/config/${LIBC_TARGET_OS}/signal.h.in")
+ add_gen_header(
+ signal
+ DEF_FILE signal.h.def
+ PARAMS
+ platform_signal=../config/${LIBC_TARGET_OS}/signal.h.in
+ GEN_HDR signal.h
+ DATA_FILES
+ ../config/${LIBC_TARGET_OS}/signal.h.in
+ DEPENDS
+ .llvm-libc-macros.signal_macros
+ .llvm-libc-types.sig_atomic_t
+ .llvm-libc-types.sigset_t
+ .llvm-libc-types.struct_sigaction
+ .llvm-libc-types.union_sigval
+ .llvm-libc-types.siginfo_t
+ .llvm-libc-types.stack_t
+ .llvm-libc-types.pid_t
+ )
+else()
+ message(STATUS "Skipping header signal.h as the target config is missing")
+endif()
add_gen_header(
stdio
diff --git a/libc/src/__support/HashTable/generic/bitmask_impl.inc b/libc/src/__support/HashTable/generic/bitmask_impl.inc
index b825cb5fbc44..56b540d568d0 100644
--- a/libc/src/__support/HashTable/generic/bitmask_impl.inc
+++ b/libc/src/__support/HashTable/generic/bitmask_impl.inc
@@ -113,7 +113,8 @@ struct Group {
}
LIBC_INLINE IteratableBitMask occupied() const {
- return {static_cast<bitmask_t>(mask_available().word ^ repeat_byte(0x80))};
+ return {
+ {static_cast<bitmask_t>(mask_available().word ^ repeat_byte(0x80))}};
}
};
} // namespace internal
diff --git a/libc/src/__support/HashTable/table.h b/libc/src/__support/HashTable/table.h
index d70ca4d23380..288829b1cac9 100644
--- a/libc/src/__support/HashTable/table.h
+++ b/libc/src/__support/HashTable/table.h
@@ -333,7 +333,7 @@ public:
return {0, full_capacity() - available_slots,
Group::load_aligned(&control(0)).occupied(), *this};
}
- iterator end() const { return {0, 0, {0}, *this}; }
+ iterator end() const { return {0, 0, {BitMask{0}}, *this}; }
LIBC_INLINE ENTRY *find(const char *key) {
uint64_t primary = oneshot_hash(key);
diff --git a/libc/src/__support/StringUtil/CMakeLists.txt b/libc/src/__support/StringUtil/CMakeLists.txt
index c053966d5418..41b20dc2cb11 100644
--- a/libc/src/__support/StringUtil/CMakeLists.txt
+++ b/libc/src/__support/StringUtil/CMakeLists.txt
@@ -51,19 +51,21 @@ add_object_library(
libc.src.__support.integer_to_string
)
-add_object_library(
- signal_to_string
- HDRS
- signal_to_string.h
- SRCS
- signal_to_string.cpp
- DEPENDS
- .message_mapper
- .platform_signals
- libc.include.signal
- libc.src.__support.common
- libc.src.__support.CPP.span
- libc.src.__support.CPP.string_view
- libc.src.__support.CPP.stringstream
- libc.src.__support.integer_to_string
-)
+if(TARGET libc.include.signal)
+ add_object_library(
+ signal_to_string
+ HDRS
+ signal_to_string.h
+ SRCS
+ signal_to_string.cpp
+ DEPENDS
+ .message_mapper
+ .platform_signals
+ libc.include.signal
+ libc.src.__support.common
+ libc.src.__support.CPP.span
+ libc.src.__support.CPP.string_view
+ libc.src.__support.CPP.stringstream
+ libc.src.__support.integer_to_string
+ )
+endif()
diff --git a/libc/src/__support/threads/linux/CMakeLists.txt b/libc/src/__support/threads/linux/CMakeLists.txt
index 642eead72772..148a0ba061c5 100644
--- a/libc/src/__support/threads/linux/CMakeLists.txt
+++ b/libc/src/__support/threads/linux/CMakeLists.txt
@@ -39,6 +39,8 @@ add_object_library(
-O3
-fno-omit-frame-pointer # This allows us to sniff out the thread args from
# the new thread's stack reliably.
+ -Wno-frame-address # Yes, calling __builtin_return_address with a
+ # value other than 0 is dangerous. We know.
)
add_object_library(
diff --git a/libcxx/cmake/caches/Armv7M-picolibc.cmake b/libcxx/cmake/caches/Armv7M-picolibc.cmake
index 91cc32fd376e..3ab80b960ed4 100644
--- a/libcxx/cmake/caches/Armv7M-picolibc.cmake
+++ b/libcxx/cmake/caches/Armv7M-picolibc.cmake
@@ -39,4 +39,4 @@ set(LIBUNWIND_ENABLE_THREADS OFF CACHE BOOL "")
set(LIBUNWIND_IS_BAREMETAL ON CACHE BOOL "")
set(LIBUNWIND_REMEMBER_HEAP_ALLOC ON CACHE BOOL "")
set(LIBUNWIND_USE_COMPILER_RT ON CACHE BOOL "")
-find_program(QEMU_SYSTEM_ARM qemu-system-arm)
+find_program(QEMU_SYSTEM_ARM qemu-system-arm REQUIRED)
diff --git a/libcxx/docs/FeatureTestMacroTable.rst b/libcxx/docs/FeatureTestMacroTable.rst
index 8ce5ec9f64ef..893a3b13ca06 100644
--- a/libcxx/docs/FeatureTestMacroTable.rst
+++ b/libcxx/docs/FeatureTestMacroTable.rst
@@ -418,7 +418,7 @@ Status
--------------------------------------------------- -----------------
``__cpp_lib_freestanding_variant`` *unimplemented*
--------------------------------------------------- -----------------
- ``__cpp_lib_fstream_native_handle`` *unimplemented*
+ ``__cpp_lib_fstream_native_handle`` ``202306L``
--------------------------------------------------- -----------------
``__cpp_lib_function_ref`` *unimplemented*
--------------------------------------------------- -----------------
diff --git a/libcxx/docs/ReleaseNotes/18.rst b/libcxx/docs/ReleaseNotes/18.rst
index cae2347be5fd..d20c19f3a29f 100644
--- a/libcxx/docs/ReleaseNotes/18.rst
+++ b/libcxx/docs/ReleaseNotes/18.rst
@@ -41,9 +41,8 @@ What's New in Libc++ 18.0.0?
Implemented Papers
------------------
-- P2093R14 Formatted output
-- P2539R4 Should the output of ``std::print`` to a terminal be synchronized with the underlying stream?
-
+- P2093R14 - Formatted output
+- P2539R4 - Should the output of ``std::print`` to a terminal be synchronized with the underlying stream?
- P2497R0 - Testing for success or failure of ``<charconv>`` functions
- P2697R1 - Interfacing ``bitset`` with ``string_view``
- P2443R1 - ``views::chunk_by``
@@ -59,6 +58,7 @@ Implemented Papers
- P2909R4 - Fix formatting of code units as integers (Dude, where’s my ``char``?)
- P2821R5 - span.at()
- P0521R0 - Proposed Resolution for CA 14 (shared_ptr use_count/unique)
+- P1759R6 - Native handles and file streams
Improvements and New Features
diff --git a/libcxx/docs/Status/Cxx20Issues.csv b/libcxx/docs/Status/Cxx20Issues.csv
index e07db8d919dd..964c21df97e2 100644
--- a/libcxx/docs/Status/Cxx20Issues.csv
+++ b/libcxx/docs/Status/Cxx20Issues.csv
@@ -176,7 +176,7 @@
"`3245 <https://wg21.link/LWG3245>`__","Unnecessary restriction on ``'%p'``\ parse specifier","Belfast","","","|chrono|"
"`3244 <https://wg21.link/LWG3244>`__","Constraints for ``Source``\ in |sect|\ [fs.path.req] insufficiently constrainty","Belfast","",""
"`3241 <https://wg21.link/LWG3241>`__","``chrono-spec``\ grammar ambiguity in |sect|\ [time.format]","Belfast","|Complete|","16.0","|chrono| |format|"
-"`3257 <https://wg21.link/LWG3257>`__","Missing feature testing macro update from P0858","Belfast","",""
+"`3257 <https://wg21.link/LWG3257>`__","Missing feature testing macro update from P0858","Belfast","|Complete|","12.0"
"`3256 <https://wg21.link/LWG3256>`__","Feature testing macro for ``constexpr``\ algorithms","Belfast","|Complete|","13.0"
"`3273 <https://wg21.link/LWG3273>`__","Specify ``weekday_indexed``\ to range of ``[0, 7]``\ ","Belfast","|Complete|","16.0","|chrono|"
"`3070 <https://wg21.link/LWG3070>`__","``path::lexically_relative``\ causes surprising results if a filename can also be a *root-name*","Belfast","",""
diff --git a/libcxx/docs/Status/Cxx2cPapers.csv b/libcxx/docs/Status/Cxx2cPapers.csv
index fa4a112d1436..5701717f3976 100644
--- a/libcxx/docs/Status/Cxx2cPapers.csv
+++ b/libcxx/docs/Status/Cxx2cPapers.csv
@@ -19,7 +19,7 @@
"`P2757R3 <https://wg21.link/P2757R3>`__","LWG","Type-checking format args","Varna June 2023","","","|format|"
"`P2637R3 <https://wg21.link/P2637R3>`__","LWG","Member ``visit``","Varna June 2023","","","|format|"
"`P2641R4 <https://wg21.link/P2641R4>`__","CWG, LWG","Checking if a ``union`` alternative is active","Varna June 2023","","",""
-"`P1759R6 <https://wg21.link/P1759R6>`__","LWG","Native handles and file streams","Varna June 2023","","",""
+"`P1759R6 <https://wg21.link/P1759R6>`__","LWG","Native handles and file streams","Varna June 2023","|Complete|","18.0",""
"`P2697R1 <https://wg21.link/P2697R1>`__","LWG","Interfacing ``bitset`` with ``string_view``","Varna June 2023","|Complete|","18.0",""
"`P1383R2 <https://wg21.link/P1383R2>`__","LWG","More ``constexpr`` for ``<cmath>`` and ``<complex>``","Varna June 2023","","",""
"`P2734R0 <https://wg21.link/P2734R0>`__","LWG","Adding the new SI prefixes","Varna June 2023","|Complete|","17.0",""
diff --git a/libcxx/include/fstream b/libcxx/include/fstream
index 7a4e15b55d56..203cc6dfb4b1 100644
--- a/libcxx/include/fstream
+++ b/libcxx/include/fstream
@@ -73,6 +73,7 @@ public:
typedef typename traits_type::int_type int_type;
typedef typename traits_type::pos_type pos_type;
typedef typename traits_type::off_type off_type;
+ using native_handle_type = typename basic_filebuf<charT, traits>::native_handle_type; // Since C++26
basic_ifstream();
explicit basic_ifstream(const char* s, ios_base::openmode mode = ios_base::in);
@@ -85,6 +86,7 @@ public:
void swap(basic_ifstream& rhs);
basic_filebuf<char_type, traits_type>* rdbuf() const;
+ native_handle_type native_handle() const noexcept; // Since C++26
bool is_open() const;
void open(const char* s, ios_base::openmode mode = ios_base::in);
void open(const string& s, ios_base::openmode mode = ios_base::in);
@@ -110,6 +112,7 @@ public:
typedef typename traits_type::int_type int_type;
typedef typename traits_type::pos_type pos_type;
typedef typename traits_type::off_type off_type;
+ using native_handle_type = typename basic_filebuf<charT, traits>::native_handle_type; // Since C++26
basic_ofstream();
explicit basic_ofstream(const char* s, ios_base::openmode mode = ios_base::out);
@@ -122,6 +125,8 @@ public:
void swap(basic_ofstream& rhs);
basic_filebuf<char_type, traits_type>* rdbuf() const;
+ native_handle_type native_handle() const noexcept; // Since C++26
+
bool is_open() const;
void open(const char* s, ios_base::openmode mode = ios_base::out);
void open(const string& s, ios_base::openmode mode = ios_base::out);
@@ -148,6 +153,7 @@ public:
typedef typename traits_type::int_type int_type;
typedef typename traits_type::pos_type pos_type;
typedef typename traits_type::off_type off_type;
+ using native_handle_type = typename basic_filebuf<charT, traits>::native_handle_type; // Since C++26
basic_fstream();
explicit basic_fstream(const char* s, ios_base::openmode mode = ios_base::in|ios_base::out);
@@ -160,6 +166,7 @@ public:
void swap(basic_fstream& rhs);
basic_filebuf<char_type, traits_type>* rdbuf() const;
+ native_handle_type native_handle() const noexcept; // Since C++26
bool is_open() const;
void open(const char* s, ios_base::openmode mode = ios_base::in|ios_base::out);
void open(const string& s, ios_base::openmode mode = ios_base::in|ios_base::out);
@@ -210,6 +217,10 @@ _LIBCPP_PUSH_MACROS
_LIBCPP_BEGIN_NAMESPACE_STD
+# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_WIN32API)
+_LIBCPP_EXPORTED_FROM_ABI void* __filebuf_windows_native_handle(FILE* __file) noexcept;
+# endif
+
template <class _CharT, class _Traits>
class _LIBCPP_TEMPLATE_VIS basic_filebuf : public basic_streambuf<_CharT, _Traits> {
public:
@@ -219,6 +230,15 @@ public:
typedef typename traits_type::pos_type pos_type;
typedef typename traits_type::off_type off_type;
typedef typename traits_type::state_type state_type;
+# if _LIBCPP_STD_VER >= 26
+# if defined(_LIBCPP_WIN32API)
+ using native_handle_type = void*; // HANDLE
+# elif __has_include(<unistd.h>)
+ using native_handle_type = int; // POSIX file descriptor
+# else
+# error "Provide a native file handle!"
+# endif
+# endif
// 27.9.1.2 Constructors/destructor:
basic_filebuf();
@@ -245,6 +265,18 @@ public:
# endif
_LIBCPP_HIDE_FROM_ABI basic_filebuf* __open(int __fd, ios_base::openmode __mode);
basic_filebuf* close();
+# if _LIBCPP_STD_VER >= 26
+ _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() const noexcept {
+ _LIBCPP_ASSERT_UNCATEGORIZED(this->is_open(), "File must be opened");
+# if defined(_LIBCPP_WIN32API)
+ return std::__filebuf_windows_native_handle(__file_);
+# elif __has_include(<unistd.h>)
+ return fileno(__file_);
+# else
+# error "Provide a way to determine the file native handle!"
+# endif
+ }
+# endif // _LIBCPP_STD_VER >= 26
_LIBCPP_HIDE_FROM_ABI inline static const char* __make_mdstring(ios_base::openmode __mode) _NOEXCEPT;
@@ -1024,6 +1056,9 @@ public:
typedef typename traits_type::int_type int_type;
typedef typename traits_type::pos_type pos_type;
typedef typename traits_type::off_type off_type;
+# if _LIBCPP_STD_VER >= 26
+ using native_handle_type = typename basic_filebuf<_CharT, _Traits>::native_handle_type;
+# endif
_LIBCPP_HIDE_FROM_ABI basic_ifstream();
_LIBCPP_HIDE_FROM_ABI explicit basic_ifstream(const char* __s, ios_base::openmode __mode = ios_base::in);
@@ -1041,6 +1076,9 @@ public:
_LIBCPP_HIDE_FROM_ABI void swap(basic_ifstream& __rhs);
_LIBCPP_HIDE_FROM_ABI basic_filebuf<char_type, traits_type>* rdbuf() const;
+# if _LIBCPP_STD_VER >= 26
+ _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() const noexcept { return rdbuf()->native_handle(); }
+# endif
_LIBCPP_HIDE_FROM_ABI bool is_open() const;
void open(const char* __s, ios_base::openmode __mode = ios_base::in);
# ifdef _LIBCPP_HAS_OPEN_WITH_WCHAR
@@ -1171,6 +1209,9 @@ public:
typedef typename traits_type::int_type int_type;
typedef typename traits_type::pos_type pos_type;
typedef typename traits_type::off_type off_type;
+# if _LIBCPP_STD_VER >= 26
+ using native_handle_type = typename basic_filebuf<_CharT, _Traits>::native_handle_type;
+# endif
_LIBCPP_HIDE_FROM_ABI basic_ofstream();
_LIBCPP_HIDE_FROM_ABI explicit basic_ofstream(const char* __s, ios_base::openmode __mode = ios_base::out);
@@ -1190,6 +1231,9 @@ public:
_LIBCPP_HIDE_FROM_ABI void swap(basic_ofstream& __rhs);
_LIBCPP_HIDE_FROM_ABI basic_filebuf<char_type, traits_type>* rdbuf() const;
+# if _LIBCPP_STD_VER >= 26
+ _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() const noexcept { return rdbuf()->native_handle(); }
+# endif
_LIBCPP_HIDE_FROM_ABI bool is_open() const;
void open(const char* __s, ios_base::openmode __mode = ios_base::out);
# ifdef _LIBCPP_HAS_OPEN_WITH_WCHAR
@@ -1321,6 +1365,9 @@ public:
typedef typename traits_type::int_type int_type;
typedef typename traits_type::pos_type pos_type;
typedef typename traits_type::off_type off_type;
+# if _LIBCPP_STD_VER >= 26
+ using native_handle_type = typename basic_filebuf<_CharT, _Traits>::native_handle_type;
+# endif
_LIBCPP_HIDE_FROM_ABI basic_fstream();
_LIBCPP_HIDE_FROM_ABI explicit basic_fstream(const char* __s,
@@ -1345,6 +1392,9 @@ public:
_LIBCPP_HIDE_FROM_ABI void swap(basic_fstream& __rhs);
_LIBCPP_HIDE_FROM_ABI basic_filebuf<char_type, traits_type>* rdbuf() const;
+# if _LIBCPP_STD_VER >= 26
+ _LIBCPP_HIDE_FROM_ABI native_handle_type native_handle() const noexcept { return rdbuf()->native_handle(); }
+# endif
_LIBCPP_HIDE_FROM_ABI bool is_open() const;
_LIBCPP_HIDE_FROM_ABI void open(const char* __s, ios_base::openmode __mode = ios_base::in | ios_base::out);
# ifdef _LIBCPP_HAS_OPEN_WITH_WCHAR
diff --git a/libcxx/include/sstream b/libcxx/include/sstream
index bd5cea9a5e94..9f75b7e0ac9e 100644
--- a/libcxx/include/sstream
+++ b/libcxx/include/sstream
@@ -398,9 +398,9 @@ public:
typename string_type::size_type __pos = __view.empty() ? 0 : __view.data() - __str_.data();
// In C++23, this is just string_type(std::move(__str_), __pos, __view.size(), __str_.get_allocator());
// But we need something that works in C++20 also.
- string_type __result(__str_.get_allocator());
- __result.__move_assign(std::move(__str_), __pos, __view.size());
- __str_.clear();
+ string_type __result(std::move(__str_), __str_.get_allocator());
+ __result.resize(__pos + __view.size());
+ __result.erase(0, __pos);
__init_buf_ptrs();
return __result;
}
diff --git a/libcxx/include/string b/libcxx/include/string
index c676182fba8b..e2be53eaee24 100644
--- a/libcxx/include/string
+++ b/libcxx/include/string
@@ -922,7 +922,7 @@ public:
// Turning off ASan instrumentation for variable initialization with _LIBCPP_STRING_INTERNAL_MEMORY_ACCESS
// does not work consistently during initialization of __r_, so we instead unpoison __str's memory manually first.
// __str's memory needs to be unpoisoned only in the case where it's a short string.
- : __r_(((__str.__is_long() ? 0 : (__str.__annotate_delete(), 0)), std::move(__str.__r_))) {
+ : __r_([](basic_string &__s){ if(!__s.__is_long()) __s.__annotate_delete(); return std::move(__s.__r_); }(__str)) {
__str.__r_.first() = __rep();
__str.__annotate_new(0);
if (!__is_long())
diff --git a/libcxx/include/tuple b/libcxx/include/tuple
index 8c954fc72c37..aa22c320b1ec 100644
--- a/libcxx/include/tuple
+++ b/libcxx/include/tuple
@@ -1416,25 +1416,27 @@ inline constexpr size_t tuple_size_v = tuple_size<_Tp>::value;
# define _LIBCPP_NOEXCEPT_RETURN(...) \
noexcept(noexcept(__VA_ARGS__)) { return __VA_ARGS__; }
+// The _LIBCPP_NOEXCEPT_RETURN macro breaks formatting.
+// clang-format off
template <class _Fn, class _Tuple, size_t... _Id>
inline _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto)
__apply_tuple_impl(_Fn&& __f, _Tuple&& __t, __tuple_indices<_Id...>)
_LIBCPP_NOEXCEPT_RETURN(std::__invoke(std::forward<_Fn>(__f), std::get<_Id>(std::forward<_Tuple>(__t))...))
- template <class _Fn, class _Tuple>
- inline _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) apply(_Fn&& __f, _Tuple&& __t) _LIBCPP_NOEXCEPT_RETURN(
- std::__apply_tuple_impl(std::forward<_Fn>(__f),
- std::forward<_Tuple>(__t),
- typename __make_tuple_indices<tuple_size_v<remove_reference_t<_Tuple>>>::type{}))
+template <class _Fn, class _Tuple>
+inline _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) apply(_Fn&& __f, _Tuple&& __t)
+ _LIBCPP_NOEXCEPT_RETURN(std::__apply_tuple_impl(
+ std::forward<_Fn>(__f),
+ std::forward<_Tuple>(__t),
+ typename __make_tuple_indices<tuple_size_v<remove_reference_t<_Tuple>>>::type{}))
- template <class _Tp, class _Tuple, size_t... _Idx>
- inline _LIBCPP_HIDE_FROM_ABI constexpr _Tp
- __make_from_tuple_impl(_Tuple&& __t, __tuple_indices<_Idx...>)
- _LIBCPP_NOEXCEPT_RETURN(_Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...))
+template <class _Tp, class _Tuple, size_t... _Idx>
+inline _LIBCPP_HIDE_FROM_ABI constexpr _Tp __make_from_tuple_impl(_Tuple&& __t, __tuple_indices<_Idx...>)
+ _LIBCPP_NOEXCEPT_RETURN(_Tp(std::get<_Idx>(std::forward<_Tuple>(__t))...))
- template <class _Tp, class _Tuple>
- inline _LIBCPP_HIDE_FROM_ABI constexpr _Tp
- make_from_tuple(_Tuple&& __t) _LIBCPP_NOEXCEPT_RETURN(std::__make_from_tuple_impl<_Tp>(
+template <class _Tp, class _Tuple>
+inline _LIBCPP_HIDE_FROM_ABI constexpr _Tp make_from_tuple(_Tuple&& __t)
+ _LIBCPP_NOEXCEPT_RETURN(std::__make_from_tuple_impl<_Tp>(
std::forward<_Tuple>(__t), typename __make_tuple_indices<tuple_size_v<remove_reference_t<_Tuple>>>::type{}))
# undef _LIBCPP_NOEXCEPT_RETURN
@@ -1443,9 +1445,11 @@ __apply_tuple_impl(_Fn&& __f, _Tuple&& __t, __tuple_indices<_Id...>)
#endif // !defined(_LIBCPP_CXX03_LANG)
- _LIBCPP_END_NAMESPACE_STD
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
- _LIBCPP_POP_MACROS
+// clang-format on
#if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20
# include <exception>
diff --git a/libcxx/include/version b/libcxx/include/version
index d3c2791a7d0b..c96647894dce 100644
--- a/libcxx/include/version
+++ b/libcxx/include/version
@@ -496,7 +496,7 @@ __cpp_lib_within_lifetime 202306L <type_traits>
// # define __cpp_lib_freestanding_optional 202311L
// # define __cpp_lib_freestanding_string_view 202311L
// # define __cpp_lib_freestanding_variant 202311L
-// # define __cpp_lib_fstream_native_handle 202306L
+# define __cpp_lib_fstream_native_handle 202306L
// # define __cpp_lib_function_ref 202306L
// # define __cpp_lib_hazard_pointer 202306L
// # define __cpp_lib_linalg 202311L
diff --git a/libcxx/src/CMakeLists.txt b/libcxx/src/CMakeLists.txt
index 329964a00136..96e7c6362d8c 100644
--- a/libcxx/src/CMakeLists.txt
+++ b/libcxx/src/CMakeLists.txt
@@ -84,6 +84,7 @@ endif()
if (LIBCXX_ENABLE_LOCALIZATION)
list(APPEND LIBCXX_SOURCES
+ fstream.cpp
include/sso_allocator.h
ios.cpp
ios.instantiations.cpp
diff --git a/libcxx/src/fstream.cpp b/libcxx/src/fstream.cpp
new file mode 100644
index 000000000000..55a4442b9c78
--- /dev/null
+++ b/libcxx/src/fstream.cpp
@@ -0,0 +1,37 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <__config>
+#include <cstdio>
+#include <fstream>
+
+#if defined(_LIBCPP_WIN32API)
+# define WIN32_LEAN_AND_MEAN
+# define NOMINMAX
+# include <io.h>
+# include <windows.h>
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if defined(_LIBCPP_WIN32API)
+
+// Confirm that `HANDLE` is `void*` as implemented in `basic_filebuf`
+static_assert(std::same_as<HANDLE, void*>);
+
+_LIBCPP_EXPORTED_FROM_ABI void* __filebuf_windows_native_handle(FILE* __file) noexcept {
+ // https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/get-osfhandle?view=msvc-170
+ intptr_t __handle = _get_osfhandle(fileno(__file));
+ if (__handle == -1)
+ return nullptr;
+ return reinterpret_cast<void*>(__handle);
+}
+
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
diff --git a/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp b/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp
index 318435660c36..46027fb46295 100644
--- a/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp
+++ b/libcxx/test/libcxx/utilities/expected/expected.expected/transform_error.mandates.verify.cpp
@@ -6,14 +6,9 @@
//
//===----------------------------------------------------------------------===//
-// https://github.com/llvm/llvm-project/pull/76232 breaks this libc++ test.
-// The fix would be to update this file. The issue is that the CI uses 2
-// versions of Clang-18
-// - An older nightly build as the main compiler
-// - A freshly bootstrap build
-// This means the test can't be used until the nightly build is updated.
-// TODO(mordante) Reenable clang-18.
-// UNSUPPORTED: clang-18
+// Clang-18 fixed some suspurious clang diagnostics. Once clang-18 is the
+// minumum required version these obsolete tests can be removed.
+// TODO(LLVM-20) remove suspurious clang diagnostic tests.
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
@@ -55,11 +50,11 @@ void test() {
{
std::expected<int, int> e;
e.transform_error(return_unexpected<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}[expected.object.general] A program that instantiates the definition of template expected<T, E> for {{.*}} is ill-formed.}}
e.transform_error(return_no_object<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}[expected.object.general] A program that instantiates the definition of template expected<T, E> for {{.*}} is ill-formed.}}
}
@@ -67,27 +62,27 @@ void test() {
{
const std::expected<int, int> e;
e.transform_error(return_unexpected<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
e.transform_error(return_no_object<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
}
// Test && overload
{
std::expected<int, int> e;
std::move(e).transform_error(return_unexpected<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
std::move(e).transform_error(return_no_object<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
}
// Test const&& overload
{
const std::expected<int, int> e;
std::move(e).transform_error(return_unexpected<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
std::move(e).transform_error(return_no_object<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* 2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-2 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
}
}
// clang-format on
diff --git a/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp b/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp
index d9f65e9f5919..cce59b99c55a 100644
--- a/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp
+++ b/libcxx/test/libcxx/utilities/expected/expected.void/transform_error.mandates.verify.cpp
@@ -6,14 +6,9 @@
//
//===----------------------------------------------------------------------===//
-// https://github.com/llvm/llvm-project/pull/76232 breaks this libc++ test.
-// The fix would be to update this file. The issue is that the CI uses 2
-// versions of Clang-18
-// - An older nightly build as the main compiler
-// - A freshly bootstrap build
-// This means the test can't be used until the nightly build is updated.
-// TODO(mordante) Reenable clang-18.
-// UNSUPPORTED: clang-18
+// Clang-18 fixed some suspurious clang diagnostics. Once clang-18 is the
+// minumum required version these obsolete tests can be removed.
+// TODO(LLVM-20) remove suspurious clang diagnostic tests.
// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
@@ -55,11 +50,11 @@ void test() {
{
std::expected<void, int> e;
e.transform_error(return_unexpected<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}A program that instantiates expected<T, E> with a E that is not a valid argument for unexpected<E> is ill-formed}}
e.transform_error(return_no_object<int&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
// expected-error-re@*:* {{static assertion failed {{.*}}A program that instantiates expected<T, E> with a E that is not a valid argument for unexpected<E> is ill-formed}}
}
@@ -67,27 +62,27 @@ void test() {
{
const std::expected<void, int> e;
e.transform_error(return_unexpected<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
e.transform_error(return_no_object<const int &>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
}
// Test && overload
{
std::expected<void, int> e;
std::move(e).transform_error(return_unexpected<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
std::move(e).transform_error(return_no_object<int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
}
// Test const&& overload
{
const std::expected<void, int> e;
std::move(e).transform_error(return_unexpected<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
std::move(e).transform_error(return_no_object<const int&&>); // expected-error-re@*:* {{static assertion failed {{.*}}The result of {{.*}} must be a valid template argument for unexpected}}
- // expected-error-re@*:* {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
+ // expected-error-re@*:* 0-1 {{{{(excess elements in struct initializer|no matching constructor for initialization of)}}{{.*}}}}
}
}
// clang-format on
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.assert.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.assert.pass.cpp
new file mode 100644
index 000000000000..9e4d88642d49
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.assert.pass.cpp
@@ -0,0 +1,32 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// REQUIRES: has-unix-headers
+// REQUIRES: libcpp-hardening-mode={{extensive|debug}}
+// XFAIL: availability-verbose_abort-missing
+
+// <fstream>
+
+// class basic_filebuf;
+
+// native_handle_type native_handle() const noexcept;
+
+#include <fstream>
+
+#include "../native_handle_assert_test_helpers.h"
+
+int main(int, char**) {
+ test_native_handle_assertion<std::basic_filebuf<char>>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_assertion<std::basic_filebuf<wchar_t>>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.pass.cpp
new file mode 100644
index 000000000000..22cc63e909c5
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/filebuf.members/native_handle.pass.cpp
@@ -0,0 +1,58 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// <fstream>
+
+// class basic_filebuf;
+
+// native_handle_type native_handle() const noexcept;
+
+#include <cassert>
+#include <fstream>
+#include <filesystem>
+#include <utility>
+
+#include "platform_support.h"
+#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
+
+template <typename CharT>
+void test() {
+ std::basic_filebuf<CharT> f;
+ std::filesystem::path p = get_temp_file_name();
+
+ // non-const
+ {
+ assert(f.open(p, std::ios_base::in) != nullptr);
+ std::same_as<NativeHandleT> decltype(auto) handle = f.native_handle();
+ assert(is_handle_valid(handle));
+ f.close();
+ assert(!is_handle_valid(handle));
+ static_assert(noexcept(f.native_handle()));
+ }
+ // const
+ {
+ assert(f.open(p, std::ios_base::in) != nullptr);
+ std::same_as<NativeHandleT> decltype(auto) const_handle = std::as_const(f).native_handle();
+ assert(is_handle_valid(const_handle));
+ f.close();
+ assert(!is_handle_valid(const_handle));
+ static_assert(noexcept(std::as_const(f).native_handle()));
+ }
+}
+
+int main(int, char**) {
+ test<char>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test<wchar_t>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/filebuf/types.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/filebuf/types.pass.cpp
index ad4249f4d809..53a5f78d5eff 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/filebuf/types.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/filebuf/types.pass.cpp
@@ -23,6 +23,7 @@
#include <type_traits>
#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
int main(int, char**)
{
@@ -32,6 +33,12 @@ int main(int, char**)
static_assert((std::is_same<std::basic_filebuf<char>::int_type, std::char_traits<char>::int_type>::value), "");
static_assert((std::is_same<std::basic_filebuf<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
static_assert((std::is_same<std::basic_filebuf<char>::off_type, std::char_traits<char>::off_type>::value), "");
+#if TEST_STD_VER >= 26
+ test_native_handle_type< std::basic_filebuf<char>>();
+# ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_type< std::basic_filebuf<wchar_t>>();
+# endif
+#endif
- return 0;
+ return 0;
}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.assert.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.assert.pass.cpp
new file mode 100644
index 000000000000..8fd37f68fc97
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.assert.pass.cpp
@@ -0,0 +1,32 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// REQUIRES: has-unix-headers
+// REQUIRES: libcpp-hardening-mode={{extensive|debug}}
+// XFAIL: availability-verbose_abort-missing
+
+// <fstream>
+
+// class basic_fstream;
+
+// native_handle_type native_handle() const noexcept;
+
+#include <fstream>
+
+#include "../native_handle_assert_test_helpers.h"
+
+int main(int, char**) {
+ test_native_handle_assertion<std::basic_fstream<char>>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_assertion<std::basic_fstream<wchar_t>>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.pass.cpp
new file mode 100644
index 000000000000..a7229512385a
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream.members/native_handle.pass.cpp
@@ -0,0 +1,27 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// <fstream>
+
+// class basic_fstream;
+
+// native_handle_type native_handle() const noexcept;
+
+#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
+
+int main(int, char**) {
+ test_native_handle<char, std::basic_fstream<char>>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle<wchar_t, std::basic_fstream<wchar_t>>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/fstream/types.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/fstream/types.pass.cpp
index 9274b1854bf8..0e275c53ed18 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/fstream/types.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/fstream/types.pass.cpp
@@ -23,6 +23,7 @@
#include <type_traits>
#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
int main(int, char**)
{
@@ -32,6 +33,12 @@ int main(int, char**)
static_assert((std::is_same<std::basic_fstream<char>::int_type, std::char_traits<char>::int_type>::value), "");
static_assert((std::is_same<std::basic_fstream<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
static_assert((std::is_same<std::basic_fstream<char>::off_type, std::char_traits<char>::off_type>::value), "");
+#if TEST_STD_VER >= 26
+ test_native_handle_type< std::basic_fstream<char>>();
+# ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_type< std::basic_fstream<wchar_t>>();
+# endif
+#endif
- return 0;
+ return 0;
}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.assert.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.assert.pass.cpp
new file mode 100644
index 000000000000..c17b778c4afa
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.assert.pass.cpp
@@ -0,0 +1,32 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// REQUIRES: has-unix-headers
+// REQUIRES: libcpp-hardening-mode={{extensive|debug}}
+// XFAIL: availability-verbose_abort-missing
+
+// <fstream>
+
+// class basic_ifstream;
+
+// native_handle_type native_handle() const noexcept;
+
+#include <fstream>
+
+#include "../native_handle_assert_test_helpers.h"
+
+int main(int, char**) {
+ test_native_handle_assertion<std::basic_ifstream<char>>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_assertion<std::basic_ifstream<wchar_t>>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.pass.cpp
new file mode 100644
index 000000000000..c2aff949c8fa
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream.members/native_handle.pass.cpp
@@ -0,0 +1,27 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// <fstream>
+
+// class basic_ifstream;
+
+// native_handle_type native_handle() const noexcept;
+
+#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
+
+int main(int, char**) {
+ test_native_handle<char, std::basic_ifstream<char>>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle<wchar_t, std::basic_ifstream<wchar_t>>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ifstream/types.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ifstream/types.pass.cpp
index 15a044021f64..580e1ed22cba 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ifstream/types.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ifstream/types.pass.cpp
@@ -23,6 +23,7 @@
#include <type_traits>
#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
int main(int, char**)
{
@@ -32,6 +33,12 @@ int main(int, char**)
static_assert((std::is_same<std::basic_ifstream<char>::int_type, std::char_traits<char>::int_type>::value), "");
static_assert((std::is_same<std::basic_ifstream<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
static_assert((std::is_same<std::basic_ifstream<char>::off_type, std::char_traits<char>::off_type>::value), "");
+#if TEST_STD_VER >= 26
+ test_native_handle_type< std::basic_ifstream<char>>();
+# ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_type< std::basic_ifstream<wchar_t>>();
+# endif
+#endif
- return 0;
+ return 0;
}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/native_handle_assert_test_helpers.h b/libcxx/test/std/input.output/file.streams/fstreams/native_handle_assert_test_helpers.h
new file mode 100644
index 000000000000..b46228ac7fba
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/native_handle_assert_test_helpers.h
@@ -0,0 +1,28 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TEST_STD_INPUT_OUTPUT_FILE_STREAMS_FSTREAMS_ASSERT_TEST_HELPERS_H
+#define TEST_STD_INPUT_OUTPUT_FILE_STREAMS_FSTREAMS_ASSERT_TEST_HELPERS_H
+
+#if !__has_include(<unistd.h>) || !__has_include(<sys/wait.h>)
+# error "Requires UNIX headers"
+#endif
+
+#include "check_assertion.h"
+
+template <typename StreamT>
+inline void test_native_handle_assertion() {
+ StreamT f;
+
+ // non-const
+ TEST_LIBCPP_ASSERT_FAILURE(f.native_handle(), "File must be opened");
+ // const
+ TEST_LIBCPP_ASSERT_FAILURE(std::as_const(f).native_handle(), "File must be opened");
+}
+
+#endif // TEST_STD_INPUT_OUTPUT_FILE_STREAMS_FSTREAMS_ASSERT_TEST_HELPERS_H
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/native_handle_test_helpers.h b/libcxx/test/std/input.output/file.streams/fstreams/native_handle_test_helpers.h
new file mode 100644
index 000000000000..4bc862d78984
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/native_handle_test_helpers.h
@@ -0,0 +1,84 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TEST_STD_INPUT_OUTPUT_FILE_STREAMS_FSTREAMS_TEST_HELPERS_H
+#define TEST_STD_INPUT_OUTPUT_FILE_STREAMS_FSTREAMS_TEST_HELPERS_H
+
+#include <cassert>
+#include <concepts>
+#include <cstdio>
+#include <fstream>
+#include <filesystem>
+#include <type_traits>
+#include <utility>
+
+#if defined(_WIN32)
+# include <io.h>
+# include <windows.h>
+#else
+# include <fcntl.h>
+#endif
+
+#include "platform_support.h"
+#include "types.h"
+
+#if TEST_STD_VER >= 26
+
+inline bool is_handle_valid(NativeHandleT handle) {
+# if defined(_WIN32)
+ BY_HANDLE_FILE_INFORMATION fileInformation;
+ return GetFileInformationByHandle(handle, &fileInformation);
+# elif __has_include(<unistd.h>) // POSIX
+ return fcntl(handle, F_GETFL) != -1 || errno != EBADF;
+# else
+# error "Provide a native file handle!"
+# endif
+}
+
+template <typename CharT, typename StreamT>
+inline void test_native_handle() {
+ static_assert(
+ std::is_same_v<typename std::basic_filebuf<CharT>::native_handle_type, typename StreamT::native_handle_type>);
+
+ StreamT f;
+ std::filesystem::path p = get_temp_file_name();
+
+ // non-const
+ {
+ f.open(p);
+ std::same_as<NativeHandleT> decltype(auto) handle = f.native_handle();
+ assert(is_handle_valid(handle));
+ assert(f.rdbuf()->native_handle() == handle);
+ assert(std::as_const(f).rdbuf()->native_handle() == handle);
+ f.close();
+ assert(!is_handle_valid(handle));
+ static_assert(noexcept(f.native_handle()));
+ }
+ // const
+ {
+ f.open(p);
+ std::same_as<NativeHandleT> decltype(auto) const_handle = std::as_const(f).native_handle();
+ assert(is_handle_valid(const_handle));
+ assert(f.rdbuf()->native_handle() == const_handle);
+ assert(std::as_const(f).rdbuf()->native_handle() == const_handle);
+ f.close();
+ assert(!is_handle_valid(const_handle));
+ static_assert(noexcept(std::as_const(f).native_handle()));
+ }
+}
+
+template <typename StreamT>
+inline void test_native_handle_type() {
+ static_assert(std::is_trivially_copyable_v<typename StreamT::native_handle_type>);
+ static_assert(std::semiregular<typename StreamT::native_handle_type>);
+ static_assert(std::is_same_v<typename StreamT::native_handle_type, NativeHandleT>);
+}
+
+#endif // #if TEST_STD_VER >= 26
+
+#endif // TEST_STD_INPUT_OUTPUT_FILE_STREAMS_FSTREAMS_TEST_HELPERS_H
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.assert.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.assert.pass.cpp
new file mode 100644
index 000000000000..243be70ec945
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.assert.pass.cpp
@@ -0,0 +1,32 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// REQUIRES: has-unix-headers
+// REQUIRES: libcpp-hardening-mode={{extensive|debug}}
+// XFAIL: availability-verbose_abort-missing
+
+// <fstream>
+
+// class basic_ofstream;
+
+// native_handle_type native_handle() const noexcept;
+
+#include <fstream>
+
+#include "../native_handle_assert_test_helpers.h"
+
+int main(int, char**) {
+ test_native_handle_assertion<std::basic_ofstream<char>>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_assertion<std::basic_ofstream<wchar_t>>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.pass.cpp
new file mode 100644
index 000000000000..88b3a00934cc
--- /dev/null
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream.members/native_handle.pass.cpp
@@ -0,0 +1,27 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20, c++23
+
+// <fstream>
+
+// class basic_ofstream;
+
+// native_handle_type native_handle() const noexcept;
+
+#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
+
+int main(int, char**) {
+ test_native_handle<char, std::basic_ofstream<char>>();
+#ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle<wchar_t, std::basic_ofstream<wchar_t>>();
+#endif
+
+ return 0;
+}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/ofstream/types.pass.cpp b/libcxx/test/std/input.output/file.streams/fstreams/ofstream/types.pass.cpp
index 0986d5907bac..242b4610af4a 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/ofstream/types.pass.cpp
+++ b/libcxx/test/std/input.output/file.streams/fstreams/ofstream/types.pass.cpp
@@ -23,6 +23,7 @@
#include <type_traits>
#include "test_macros.h"
+#include "../native_handle_test_helpers.h"
int main(int, char**)
{
@@ -32,6 +33,12 @@ int main(int, char**)
static_assert((std::is_same<std::basic_ofstream<char>::int_type, std::char_traits<char>::int_type>::value), "");
static_assert((std::is_same<std::basic_ofstream<char>::pos_type, std::char_traits<char>::pos_type>::value), "");
static_assert((std::is_same<std::basic_ofstream<char>::off_type, std::char_traits<char>::off_type>::value), "");
+#if TEST_STD_VER >= 26
+ test_native_handle_type< std::basic_ofstream<char>>();
+# ifndef TEST_HAS_NO_WIDE_CHARACTERS
+ test_native_handle_type< std::basic_ofstream<wchar_t>>();
+# endif
+#endif
- return 0;
+ return 0;
}
diff --git a/libcxx/test/std/input.output/file.streams/fstreams/types.h b/libcxx/test/std/input.output/file.streams/fstreams/types.h
index b919ba201853..29c8ad2d06c4 100644
--- a/libcxx/test/std/input.output/file.streams/fstreams/types.h
+++ b/libcxx/test/std/input.output/file.streams/fstreams/types.h
@@ -80,4 +80,14 @@ struct LibraryDefaultBuffer {
void operator()(std::basic_ifstream<CharT>&) const {}
};
+#if TEST_STD_VER >= 26
+# if defined(_WIN32)
+using NativeHandleT = void*; // HANDLE
+# elif __has_include(<unistd.h>)
+using NativeHandleT = int; // POSIX file descriptor
+# else
+# error "Provide a native file handle!"
+# endif
+#endif
+
#endif // TEST_STD_INPUT_OUTPUT_FILE_STREAMS_FSTREAMS_TYPES_H
diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/fstream.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/fstream.version.compile.pass.cpp
index 981f5420ff7e..eab0313b2c1e 100644
--- a/libcxx/test/std/language.support/support.limits/support.limits.general/fstream.version.compile.pass.cpp
+++ b/libcxx/test/std/language.support/support.limits/support.limits.general/fstream.version.compile.pass.cpp
@@ -56,17 +56,11 @@
#elif TEST_STD_VER > 23
-# if !defined(_LIBCPP_VERSION)
-# ifndef __cpp_lib_fstream_native_handle
-# error "__cpp_lib_fstream_native_handle should be defined in c++26"
-# endif
-# if __cpp_lib_fstream_native_handle != 202306L
-# error "__cpp_lib_fstream_native_handle should have the value 202306L in c++26"
-# endif
-# else // _LIBCPP_VERSION
-# ifdef __cpp_lib_fstream_native_handle
-# error "__cpp_lib_fstream_native_handle should not be defined because it is unimplemented in libc++!"
-# endif
+# ifndef __cpp_lib_fstream_native_handle
+# error "__cpp_lib_fstream_native_handle should be defined in c++26"
+# endif
+# if __cpp_lib_fstream_native_handle != 202306L
+# error "__cpp_lib_fstream_native_handle should have the value 202306L in c++26"
# endif
#endif // TEST_STD_VER > 23
diff --git a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp
index 3b7f2d26feeb..d5a0839b30f8 100644
--- a/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp
+++ b/libcxx/test/std/language.support/support.limits/support.limits.general/version.version.compile.pass.cpp
@@ -6511,17 +6511,11 @@
# endif
# endif
-# if !defined(_LIBCPP_VERSION)
-# ifndef __cpp_lib_fstream_native_handle
-# error "__cpp_lib_fstream_native_handle should be defined in c++26"
-# endif
-# if __cpp_lib_fstream_native_handle != 202306L
-# error "__cpp_lib_fstream_native_handle should have the value 202306L in c++26"
-# endif
-# else // _LIBCPP_VERSION
-# ifdef __cpp_lib_fstream_native_handle
-# error "__cpp_lib_fstream_native_handle should not be defined because it is unimplemented in libc++!"
-# endif
+# ifndef __cpp_lib_fstream_native_handle
+# error "__cpp_lib_fstream_native_handle should be defined in c++26"
+# endif
+# if __cpp_lib_fstream_native_handle != 202306L
+# error "__cpp_lib_fstream_native_handle should have the value 202306L in c++26"
# endif
# if !defined(_LIBCPP_VERSION)
diff --git a/libcxx/utils/generate_feature_test_macro_components.py b/libcxx/utils/generate_feature_test_macro_components.py
index 3ad5170d73ff..8ee92909dfa5 100755
--- a/libcxx/utils/generate_feature_test_macro_components.py
+++ b/libcxx/utils/generate_feature_test_macro_components.py
@@ -570,7 +570,6 @@ feature_test_macros = [
"name": "__cpp_lib_fstream_native_handle",
"values": {"c++26": 202306}, # P1759R6 Native handles and file streams
"headers": ["fstream"],
- "unimplemented": True,
},
{
"name": "__cpp_lib_function_ref",
diff --git a/libcxx/utils/qemu_baremetal.py b/libcxx/utils/qemu_baremetal.py
index aaf5b8448906..126031bbb19c 100755
--- a/libcxx/utils/qemu_baremetal.py
+++ b/libcxx/utils/qemu_baremetal.py
@@ -16,6 +16,7 @@ output (if the underlying baremetal enviroment supports QEMU semihosting).
import argparse
import os
import sys
+import shutil
def main():
@@ -32,8 +33,13 @@ def main():
parser.add_argument("test_binary")
parser.add_argument("test_args", nargs=argparse.ZERO_OR_MORE, default=[])
args = parser.parse_args()
+
+ if not shutil.which(args.qemu):
+ sys.exit(f"Failed to find QEMU binary from --qemu value: '{args.qemu}'")
+
if not os.path.exists(args.test_binary):
sys.exit(f"Expected argument to be a test executable: '{args.test_binary}'")
+
qemu_commandline = [
args.qemu,
"-chardev",
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index 048f0ec30ebd..54b0a84e5213 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -1025,8 +1025,7 @@ addTaggedSymbolReferences(InputSectionBase &sec,
// symbols should also be built with tagging. But, to handle these cases, we
// demote the symbol to be untagged.
void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) {
- assert(config->emachine == EM_AARCH64 &&
- config->androidMemtagMode != ELF::NT_MEMTAG_LEVEL_NONE);
+ assert(hasMemtag());
// First, collect all symbols that are marked as tagged, and count how many
// times they're marked as tagged.
diff --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index 2135ac234864..c28e01e48195 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -358,6 +358,7 @@ RelExpr X86_64::getRelExpr(RelType type, const Symbol &s,
case R_X86_64_DTPOFF64:
return R_DTPREL;
case R_X86_64_TPOFF32:
+ case R_X86_64_TPOFF64:
return R_TPREL;
case R_X86_64_TLSDESC_CALL:
return R_TLSDESC_CALL;
@@ -791,6 +792,7 @@ void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
write32le(loc, val);
break;
case R_X86_64_64:
+ case R_X86_64_TPOFF64:
case R_X86_64_DTPOFF64:
case R_X86_64_PC64:
case R_X86_64_SIZE64:
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index 2b32eb3a0fe3..19fced5aff92 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -1450,7 +1450,7 @@ DynamicSection<ELFT>::computeContents() {
if (config->zPacPlt)
addInt(DT_AARCH64_PAC_PLT, 0);
- if (config->androidMemtagMode != ELF::NT_MEMTAG_LEVEL_NONE) {
+ if (hasMemtag()) {
addInt(DT_AARCH64_MEMTAG_MODE, config->androidMemtagMode == NT_MEMTAG_LEVEL_ASYNC);
addInt(DT_AARCH64_MEMTAG_HEAP, config->androidMemtagHeap);
addInt(DT_AARCH64_MEMTAG_STACK, config->androidMemtagStack);
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index a84e4864ab0e..7b9880a034bc 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -291,6 +291,11 @@ static void demoteSymbolsAndComputeIsPreemptible() {
}
}
+bool elf::hasMemtag() {
+ return config->emachine == EM_AARCH64 &&
+ config->androidMemtagMode != ELF::NT_MEMTAG_LEVEL_NONE;
+}
+
// Fully static executables don't support MTE globals at this point in time, as
// we currently rely on:
// - A dynamic loader to process relocations, and
@@ -298,8 +303,7 @@ static void demoteSymbolsAndComputeIsPreemptible() {
// This restriction could be removed in future by re-using some of the ideas
// that ifuncs use in fully static executables.
bool elf::canHaveMemtagGlobals() {
- return config->emachine == EM_AARCH64 &&
- config->androidMemtagMode != ELF::NT_MEMTAG_LEVEL_NONE &&
+ return hasMemtag() &&
(config->relocatable || config->shared || needsInterpSection());
}
@@ -397,11 +401,13 @@ template <class ELFT> void elf::createSyntheticSections() {
std::make_unique<SymbolTableSection<ELFT>>(*part.dynStrTab);
part.dynamic = std::make_unique<DynamicSection<ELFT>>();
- if (canHaveMemtagGlobals()) {
+ if (hasMemtag()) {
part.memtagAndroidNote = std::make_unique<MemtagAndroidNote>();
add(*part.memtagAndroidNote);
- part.memtagDescriptors = std::make_unique<MemtagDescriptors>();
- add(*part.memtagDescriptors);
+ if (canHaveMemtagGlobals()) {
+ part.memtagDescriptors = std::make_unique<MemtagDescriptors>();
+ add(*part.memtagDescriptors);
+ }
}
if (config->androidPackDynRelocs)
diff --git a/lld/ELF/Writer.h b/lld/ELF/Writer.h
index eaf021aac42e..aac8176d9098 100644
--- a/lld/ELF/Writer.h
+++ b/lld/ELF/Writer.h
@@ -57,6 +57,7 @@ bool isMipsN32Abi(const InputFile *f);
bool isMicroMips();
bool isMipsR6();
+bool hasMemtag();
bool canHaveMemtagGlobals();
} // namespace lld::elf
diff --git a/lld/test/ELF/aarch64-memtag-android-abi.s b/lld/test/ELF/aarch64-memtag-android-abi.s
index e5744483e447..7c6a26aa9524 100644
--- a/lld/test/ELF/aarch64-memtag-android-abi.s
+++ b/lld/test/ELF/aarch64-memtag-android-abi.s
@@ -56,6 +56,18 @@
# BAD-MODE: error: unknown --android-memtag-mode value: "asymm", should be one of
# BAD-MODE: {async, sync, none}
+# RUN: ld.lld -static --android-memtag-mode=sync --android-memtag-heap \
+# RUN: --android-memtag-stack %t.o -o %t
+# RUN: llvm-readelf --memtag %t | FileCheck %s --check-prefixes=STATIC
+
+# STATIC: Memtag Dynamic Entries:
+# STATIC-NEXT: < none found >
+# STATIC: Memtag Android Note:
+# STATIC-NEXT: Tagging Mode: SYNC
+# STATIC-NEXT: Heap: Enabled
+# STATIC-NEXT: Stack: Enabled
+
+
.globl _start
_start:
ret
diff --git a/lld/test/ELF/eh-frame-nonzero-offset-riscv.s b/lld/test/ELF/eh-frame-nonzero-offset-riscv.s
new file mode 100644
index 000000000000..fa78b7686373
--- /dev/null
+++ b/lld/test/ELF/eh-frame-nonzero-offset-riscv.s
@@ -0,0 +1,53 @@
+// REQUIRES: riscv
+// RUN: rm -rf %t && split-file %s %t && cd %t
+
+// RUN: llvm-mc -filetype=obj -triple=riscv64 a.s -o a.o
+// RUN: ld.lld a.o -T eh-frame-non-zero-offset.t -o non-zero
+// RUN: llvm-readelf --program-headers --unwind --symbols -x .eh_frame non-zero | FileCheck --check-prefix=NONZERO %s
+// RUN: ld.lld a.o -T eh-frame-zero-offset.t -o zero
+// RUN: llvm-readelf --program-headers --unwind --symbols -x .eh_frame zero | FileCheck --check-prefix=ZERO %s
+
+// NONZERO: {{[0-9]+}}: 0000000000000088 {{.*}} __eh_frame_start
+// NONZERO-NEXT: {{[0-9]+}}: 00000000000000b4 {{.*}} __eh_frame_end
+
+// NONZERO: 0x00000088 10000000 00000000 017a5200 01780101 .
+// NONZERO-NEXT: 0x00000098 1b0c0200 10000000 18000000 5cffffff .
+// NONZERO-NEXT: 0x000000a8 04000000 00000000 00000000 .
+
+// ZERO: {{[0-9]+}}: 0000000000000008 {{.*}} __eh_frame_start
+// ZERO-NEXT: {{[0-9]+}}: 0000000000000034 {{.*}} __eh_frame_end
+
+// ZERO: 0x00000008 10000000 00000000 017a5200 01780101 .
+// ZERO-NEXT: 0x00000018 1b0c0200 10000000 18000000 dcffffff .
+// ZERO-NEXT: 0x00000028 04000000 00000000 00000000 .
+
+//--- eh-frame-non-zero-offset.t
+SECTIONS {
+ .text : { *(.text .text.*) }
+ .eh_frame : {
+ /* Padding within .eh_frame */
+ . += 128;
+ __eh_frame_start = .;
+ *(.eh_frame) ;
+ __eh_frame_end = .;
+ }
+}
+
+//--- eh-frame-zero-offset.t
+SECTIONS {
+ .text : { *(.text .text.*) }
+ .eh_frame : {
+ __eh_frame_start = .;
+ *(.eh_frame) ;
+ __eh_frame_end = .;
+ }
+}
+
+//--- a.s
+.section .text.01, "ax",%progbits
+.global f1
+.type f1, %function
+f1:
+.cfi_startproc
+.space 4
+.cfi_endproc
diff --git a/lld/test/ELF/linkerscript/overlay-reject.test b/lld/test/ELF/linkerscript/overlay-reject.test
deleted file mode 100644
index fa8a2be37aed..000000000000
--- a/lld/test/ELF/linkerscript/overlay-reject.test
+++ /dev/null
@@ -1,13 +0,0 @@
-# REQUIRES: x86
-# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux /dev/null -o %t.o
-# RUN: not ld.lld %t.o --script %s -o /dev/null 2>&1 | FileCheck %s
-
-# CHECK: {{.*}}.test:{{.*}}: { expected, but got 0x3000
-# CHECK-NEXT: >>> .out.aaa 0x3000 : { *(.aaa) }
-# CHECK-NEXT: >>> ^
-
-SECTIONS {
- OVERLAY 0x1000 : AT ( 0x2000 ) {
- .out.aaa 0x3000 : { *(.aaa) }
- }
-}
diff --git a/lld/test/ELF/linkerscript/overlay-reject2.test b/lld/test/ELF/linkerscript/overlay-reject2.test
deleted file mode 100644
index be886d7c1dac..000000000000
--- a/lld/test/ELF/linkerscript/overlay-reject2.test
+++ /dev/null
@@ -1,17 +0,0 @@
-# REQUIRES: x86
-# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux /dev/null -o %t.o
-# RUN: not ld.lld %t.o --script %s -o /dev/null 2>&1 | FileCheck %s
-
-# CHECK: {{.*}}.test:{{.*}}: { expected, but got AX
-# CHECK-NEXT: >>> .out.aaa { *(.aaa) } > AX AT>FLASH
-# CHECK-NEXT: >>> ^
-
-MEMORY {
- AX (ax) : ORIGIN = 0x3000, LENGTH = 0x4000
-}
-
-SECTIONS {
- OVERLAY 0x1000 : AT ( 0x2000 ) {
- .out.aaa { *(.aaa) } > AX AT>FLASH
- }
-}
diff --git a/lld/test/ELF/linkerscript/overlay.test b/lld/test/ELF/linkerscript/overlay.test
index 2d3c88759c63..942e0a2971be 100644
--- a/lld/test/ELF/linkerscript/overlay.test
+++ b/lld/test/ELF/linkerscript/overlay.test
@@ -1,31 +1,91 @@
# REQUIRES: x86
-# RUN: echo 'nop; .section .small, "a"; .long 0; .section .big, "a"; .quad 1;' \
-# RUN: | llvm-mc -filetype=obj -triple=x86_64-unknown-linux - -o %t.o
-# RUN: ld.lld %t.o --script %s -o %t
+# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=x86_64 a.s -o a.o
+# RUN: ld.lld a.o -T a.t -o a
-SECTIONS {
- OVERLAY 0x1000 : AT ( 0x4000 ) {
- .out.big { *(.big) }
- .out.small { *(.small) }
- }
-}
-
-## Here we check that can handle OVERLAY which will produce sections
+## Here we check that can handle OVERLAY which will produce sections
## .out.big and .out.small with the same starting VAs, but different LMAs.
## Section .big is larger than .small, we check that placing of section
## .text does not cause overlapping error and that
## .text's VA is 0x1000 + max(sizeof(.out.big), sizeof(.out.small)).
-# RUN: llvm-readelf --sections -l %t | FileCheck %s
+# RUN: llvm-readelf --sections -l a | FileCheck %s
-# CHECK: Section Headers:
-# CHECK: Name Type Address Off Size
-# CHECK: .out.big PROGBITS 0000000000001000 001000 000008
-# CHECK: .out.small PROGBITS 0000000000001000 002000 000004
-# CHECK: .text PROGBITS 0000000000001008 002008 000001
+# CHECK: Name Type Address Off Size
+# CHECK: .big1 PROGBITS 0000000000001000 001000 000008
+# CHECK-NEXT: .small1 PROGBITS 0000000000001000 002000 000004
+# CHECK-NEXT: .small2 PROGBITS 0000000000001008 002008 000004
+# CHECK-NEXT: .big2 PROGBITS 0000000000001008 003008 000008
+# CHECK-NEXT: .small3 PROGBITS 0000000000001010 003010 000004
+# CHECK-NEXT: .big3 PROGBITS 0000000000001014 003014 000008
+# CHECK-NEXT: .text PROGBITS 0000000000001024 003024 000001
# CHECK: Program Headers:
# CHECK: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
-# CHECK-NEXT: LOAD 0x001000 0x0000000000001000 0x0000000000004000 0x000008 0x000008 R 0x1000
-# CHECK-NEXT: LOAD 0x002000 0x0000000000001000 0x0000000000004008 0x000004 0x000004 R 0x1000
-# CHECK-NEXT: LOAD 0x002008 0x0000000000001008 0x0000000000004010 0x000001 0x000001 R E 0x1000
+# CHECK-NEXT: LOAD 0x001000 0x0000000000001000 0x0000000000001000 0x000008 0x000008 R 0x1000
+# CHECK-NEXT: LOAD 0x002000 0x0000000000001000 0x0000000000001008 0x000004 0x000004 R 0x1000
+# CHECK-NEXT: LOAD 0x002008 0x0000000000001008 0x0000000000002008 0x000004 0x000004 R 0x1000
+# CHECK-NEXT: LOAD 0x003008 0x0000000000001008 0x000000000000200c 0x000008 0x000008 R 0x1000
+## FIXME Fix p_paddr when the first section in an overlay is empty and discarded.
+# CHECK-NEXT: LOAD 0x003010 0x0000000000001010 0x0000000000000000 0x000004 0x000004 R 0x1000
+# CHECK-NEXT: LOAD 0x003014 0x0000000000001014 0x0000000000000004 0x000008 0x000008 R 0x1000
+# CHECK-NEXT: LOAD 0x003024 0x0000000000001024 0x0000000000000014 0x000001 0x000001 R E 0x1000
+
+# RUN: not ld.lld a.o -T err1.t 2>&1 | FileCheck %s --check-prefix=ERR1 --match-full-lines --strict-whitespace
+# ERR1:{{.*}}error: err1.t:3: { expected, but got 0x3000
+# ERR1-NEXT:>>> .out.aaa 0x3000 : { *(.aaa) }
+# ERR1-NEXT:>>> ^
+
+# RUN: not ld.lld a.o -T err2.t 2>&1 | FileCheck %s --check-prefix=ERR2 --match-full-lines --strict-whitespace
+# ERR2:{{.*}}error: err2.t:{{.*}}: { expected, but got AX
+# ERR2-NEXT:>>> .out.aaa { *(.aaa) } > AX AT>FLASH
+# ERR2-NEXT:>>> ^
+
+#--- a.s
+.globl _start
+_start:
+ nop
+
+.section .small1, "a"; .long 0
+.section .big1, "a"; .quad 1
+
+.section .small2, "a"; .long 0
+.section .big2, "a"; .quad 1
+
+.section .small3, "a"; .long 0
+.section .big3, "a"; .quad 1
+
+#--- a.t
+SECTIONS {
+ OVERLAY 0x1000 : AT( 0x1000 ) {
+ .big1 { *(.big1) }
+ .small1 { *(.small1) }
+ }
+ OVERLAY 0x1008 : AT (0x2008) {
+ .small2 { *(.small2) }
+ .big2 { *(.big2) }
+ }
+ OVERLAY . : AT (0x2014) {
+ .empty3 { *(.empty3) }
+ .small3 { *(.small3) }
+ .big3 { *(.big3) }
+ }
+ .text : { *(.text) }
+}
+
+#--- err1.t
+SECTIONS {
+ OVERLAY 0x1000 : AT ( 0x2000 ) {
+ .out.aaa 0x3000 : { *(.aaa) }
+ }
+}
+
+#--- err2.t
+MEMORY {
+ AX (ax) : ORIGIN = 0x3000, LENGTH = 0x4000
+}
+SECTIONS {
+ OVERLAY 0x1000 : AT ( 0x2000 ) {
+ .out.aaa { *(.aaa) } > AX AT>FLASH
+ }
+}
diff --git a/lld/test/ELF/x86-64-tls-pie.s b/lld/test/ELF/x86-64-tls-pie.s
index 5ef0f54f435d..71caa0f49803 100644
--- a/lld/test/ELF/x86-64-tls-pie.s
+++ b/lld/test/ELF/x86-64-tls-pie.s
@@ -1,14 +1,23 @@
# REQUIRES: x86
# RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-cloudabi %s -o %t1.o
# RUN: ld.lld -pie %t1.o -o %t
-# RUN: llvm-readobj -r %t | FileCheck %s
+# RUN: llvm-readobj -r %t | FileCheck --check-prefix=RELOCS %s
+# RUN: llvm-objdump -d --no-show-raw-insn --no-print-imm-hex --no-leading-addr %t1.o | FileCheck --check-prefix=DIS %s
# Bug 27174: R_X86_64_TPOFF32 and R_X86_64_GOTTPOFF relocations should
# be eliminated when building a PIE executable, as the static TLS layout
# is fixed.
#
-# CHECK: Relocations [
-# CHECK-NEXT: ]
+# RELOCS: Relocations [
+# RELOCS-NEXT: ]
+#
+# DIS: <_start>:
+# DIS-NEXT: movq %fs:0, %rax
+# DIS-NEXT: movl $3, (%rax)
+# DIS-NEXT: movq %fs:0, %rdx
+# DIS-NEXT: movq (%rip), %rcx
+# DIS-NEXT: movl $3, (%rdx,%rcx)
+# DIS-NEXT: movabsq 0, %rax
.globl _start
_start:
@@ -19,6 +28,9 @@ _start:
movq i@GOTTPOFF(%rip), %rcx
movl $3, (%rdx,%rcx)
+ # This additionally tests support for R_X86_64_TPOFF64 relocations.
+ movabs i@TPOFF, %rax
+
.section .tbss.i,"awT",@nobits
.globl i
i:
diff --git a/lldb/include/lldb/Breakpoint/BreakpointIDList.h b/lldb/include/lldb/Breakpoint/BreakpointIDList.h
index 924cb1f26b8b..6910024695d8 100644
--- a/lldb/include/lldb/Breakpoint/BreakpointIDList.h
+++ b/lldb/include/lldb/Breakpoint/BreakpointIDList.h
@@ -33,7 +33,7 @@ public:
size_t GetSize() const;
- const BreakpointID &GetBreakpointIDAtIndex(size_t index) const;
+ BreakpointID GetBreakpointIDAtIndex(size_t index) const;
bool RemoveBreakpointIDAtIndex(size_t index);
@@ -48,9 +48,6 @@ public:
bool FindBreakpointID(const char *bp_id, size_t *position) const;
- void InsertStringArray(llvm::ArrayRef<const char *> string_array,
- CommandReturnObject &result);
-
// Returns a pair consisting of the beginning and end of a breakpoint
// ID range expression. If the input string is not a valid specification,
// returns an empty pair.
@@ -66,7 +63,6 @@ public:
private:
BreakpointIDArray m_breakpoint_ids;
- BreakpointID m_invalid_id;
BreakpointIDList(const BreakpointIDList &) = delete;
const BreakpointIDList &operator=(const BreakpointIDList &) = delete;
diff --git a/lldb/source/Breakpoint/BreakpointIDList.cpp b/lldb/source/Breakpoint/BreakpointIDList.cpp
index dd16d3b6388c..05c461827cad 100644
--- a/lldb/source/Breakpoint/BreakpointIDList.cpp
+++ b/lldb/source/Breakpoint/BreakpointIDList.cpp
@@ -20,17 +20,15 @@ using namespace lldb_private;
// class BreakpointIDList
-BreakpointIDList::BreakpointIDList()
- : m_invalid_id(LLDB_INVALID_BREAK_ID, LLDB_INVALID_BREAK_ID) {}
+BreakpointIDList::BreakpointIDList() : m_breakpoint_ids() {}
BreakpointIDList::~BreakpointIDList() = default;
size_t BreakpointIDList::GetSize() const { return m_breakpoint_ids.size(); }
-const BreakpointID &
-BreakpointIDList::GetBreakpointIDAtIndex(size_t index) const {
+BreakpointID BreakpointIDList::GetBreakpointIDAtIndex(size_t index) const {
return ((index < m_breakpoint_ids.size()) ? m_breakpoint_ids[index]
- : m_invalid_id);
+ : BreakpointID());
}
bool BreakpointIDList::RemoveBreakpointIDAtIndex(size_t index) {
@@ -82,19 +80,6 @@ bool BreakpointIDList::FindBreakpointID(const char *bp_id_str,
return FindBreakpointID(*bp_id, position);
}
-void BreakpointIDList::InsertStringArray(
- llvm::ArrayRef<const char *> string_array, CommandReturnObject &result) {
- if(string_array.empty())
- return;
-
- for (const char *str : string_array) {
- auto bp_id = BreakpointID::ParseCanonicalReference(str);
- if (bp_id)
- m_breakpoint_ids.push_back(*bp_id);
- }
- result.SetStatus(eReturnStatusSuccessFinishNoResult);
-}
-
// This function takes OLD_ARGS, which is usually the result of breaking the
// command string arguments into
// an array of space-separated strings, and searches through the arguments for
diff --git a/lldb/source/Commands/CommandObjectBreakpoint.cpp b/lldb/source/Commands/CommandObjectBreakpoint.cpp
index 63492590d32d..f9ba68eda3ff 100644
--- a/lldb/source/Commands/CommandObjectBreakpoint.cpp
+++ b/lldb/source/Commands/CommandObjectBreakpoint.cpp
@@ -2494,7 +2494,9 @@ void CommandObjectMultiwordBreakpoint::VerifyIDs(
// NOW, convert the list of breakpoint id strings in TEMP_ARGS into an actual
// BreakpointIDList:
- valid_ids->InsertStringArray(temp_args.GetArgumentArrayRef(), result);
+ for (llvm::StringRef temp_arg : temp_args.GetArgumentArrayRef())
+ if (auto bp_id = BreakpointID::ParseCanonicalReference(temp_arg))
+ valid_ids->AddBreakpointID(*bp_id);
// At this point, all of the breakpoint ids that the user passed in have
// been converted to breakpoint IDs and put into valid_ids.
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
index 009722b85aa1..54d06b1115a2 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
@@ -142,6 +142,18 @@ static bool ShouldIgnoreArtificialField(llvm::StringRef FieldName) {
|| FieldName.starts_with("_vptr.");
}
+/// Returns true for C++ constructs represented by clang::CXXRecordDecl
+static bool TagIsRecordType(dw_tag_t tag) {
+ switch (tag) {
+ case DW_TAG_class_type:
+ case DW_TAG_structure_type:
+ case DW_TAG_union_type:
+ return true;
+ default:
+ return false;
+ }
+}
+
TypeSP DWARFASTParserClang::ParseTypeFromClangModule(const SymbolContext &sc,
const DWARFDIE &die,
Log *log) {
@@ -3304,12 +3316,19 @@ clang::Decl *DWARFASTParserClang::GetClangDeclForDIE(const DWARFDIE &die) {
return nullptr;
switch (die.Tag()) {
- case DW_TAG_variable:
case DW_TAG_constant:
case DW_TAG_formal_parameter:
case DW_TAG_imported_declaration:
case DW_TAG_imported_module:
break;
+ case DW_TAG_variable:
+ // This means 'die' is a C++ static data member.
+ // We don't want to create decls for such members
+ // here.
+ if (auto parent = die.GetParent();
+ parent.IsValid() && TagIsRecordType(parent.Tag()))
+ return nullptr;
+ break;
default:
return nullptr;
}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp
index b1c323b101ce..20c07a94b507 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "Plugins/SymbolFile/DWARF/DWARFIndex.h"
+#include "DWARFDebugInfoEntry.h"
+#include "DWARFDeclContext.h"
#include "Plugins/Language/ObjC/ObjCLanguage.h"
#include "Plugins/SymbolFile/DWARF/DWARFDIE.h"
#include "Plugins/SymbolFile/DWARF/SymbolFileDWARF.h"
@@ -112,3 +114,21 @@ void DWARFIndex::ReportInvalidDIERef(DIERef ref, llvm::StringRef name) const {
"bad die {0:x16} for '{1}')\n",
ref.die_offset(), name.str().c_str());
}
+
+void DWARFIndex::GetFullyQualifiedType(
+ const DWARFDeclContext &context,
+ llvm::function_ref<bool(DWARFDIE die)> callback) {
+ GetTypes(context, [&](DWARFDIE die) {
+ return GetFullyQualifiedTypeImpl(context, die, callback);
+ });
+}
+
+bool DWARFIndex::GetFullyQualifiedTypeImpl(
+ const DWARFDeclContext &context, DWARFDIE die,
+ llvm::function_ref<bool(DWARFDIE die)> callback) {
+ DWARFDeclContext dwarf_decl_ctx =
+ die.GetDIE()->GetDWARFDeclContext(die.GetCU());
+ if (dwarf_decl_ctx == context)
+ return callback(die);
+ return true;
+}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
index 9aadeddbb217..0551b07100a9 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
@@ -53,6 +53,14 @@ public:
llvm::function_ref<bool(DWARFDIE die)> callback) = 0;
virtual void GetTypes(const DWARFDeclContext &context,
llvm::function_ref<bool(DWARFDIE die)> callback) = 0;
+
+ /// Finds all DIEs whose fully qualified name matches `context`. A base
+ /// implementation is provided, and it uses the entire CU to check the DIE
+ /// parent hierarchy. Specializations should override this if they are able
+ /// to provide a faster implementation.
+ virtual void
+ GetFullyQualifiedType(const DWARFDeclContext &context,
+ llvm::function_ref<bool(DWARFDIE die)> callback);
virtual void
GetNamespaces(ConstString name,
llvm::function_ref<bool(DWARFDIE die)> callback) = 0;
@@ -102,6 +110,12 @@ protected:
}
void ReportInvalidDIERef(DIERef ref, llvm::StringRef name) const;
+
+ /// Implementation of `GetFullyQualifiedType` to check a single entry,
+ /// shareable with derived classes.
+ bool
+ GetFullyQualifiedTypeImpl(const DWARFDeclContext &context, DWARFDIE die,
+ llvm::function_ref<bool(DWARFDIE die)> callback);
};
} // namespace dwarf
} // namespace lldb_private::plugin
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
index 447930ffe07b..737da7798b82 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
@@ -3138,7 +3138,7 @@ SymbolFileDWARF::FindDefinitionTypeForDWARFDeclContext(const DWARFDIE &die) {
}
const DWARFDeclContext die_dwarf_decl_ctx = GetDWARFDeclContext(die);
- m_index->GetTypes(die_dwarf_decl_ctx, [&](DWARFDIE type_die) {
+ m_index->GetFullyQualifiedType(die_dwarf_decl_ctx, [&](DWARFDIE type_die) {
// Make sure type_die's language matches the type system we are
// looking for. We don't want to find a "Foo" type from Java if we
// are looking for a "Foo" type for C, C++, ObjC, or ObjC++.
@@ -3165,9 +3165,8 @@ SymbolFileDWARF::FindDefinitionTypeForDWARFDeclContext(const DWARFDIE &die) {
return true;
}
- DWARFDeclContext type_dwarf_decl_ctx = GetDWARFDeclContext(type_die);
-
if (log) {
+ DWARFDeclContext type_dwarf_decl_ctx = GetDWARFDeclContext(type_die);
GetObjectFile()->GetModule()->LogMessage(
log,
"SymbolFileDWARF::"
@@ -3177,10 +3176,6 @@ SymbolFileDWARF::FindDefinitionTypeForDWARFDeclContext(const DWARFDIE &die) {
type_dwarf_decl_ctx.GetQualifiedName());
}
- // Make sure the decl contexts match all the way up
- if (die_dwarf_decl_ctx != type_dwarf_decl_ctx)
- return true;
-
Type *resolved_type = ResolveType(type_die, false);
if (!resolved_type || resolved_type == DIE_IS_BEING_PARSED)
return true;
diff --git a/lldb/test/API/commands/expression/nested/TestNestedExpressions.py b/lldb/test/API/commands/expression/nested/TestNestedExpressions.py
index 7f194e921e56..6a97d4f34e67 100644
--- a/lldb/test/API/commands/expression/nested/TestNestedExpressions.py
+++ b/lldb/test/API/commands/expression/nested/TestNestedExpressions.py
@@ -33,6 +33,8 @@ class NestedExpressions(TestBase):
self.expect_expr("sizeof(A::B::C)", result_value="1")
self.expect_expr("sizeof(A::B)", result_value="2")
+ # Fails on Windows for unknown reasons.
+ @skipIfWindows
def test_static_in_nested_structs(self):
"""
Test expressions that references a static variable in nested structs.
diff --git a/lldb/test/API/functionalities/inline-sourcefile/TestInlineSourceFiles.py b/lldb/test/API/functionalities/inline-sourcefile/TestInlineSourceFiles.py
index 20ed0ce00661..ad10a63e6013 100644
--- a/lldb/test/API/functionalities/inline-sourcefile/TestInlineSourceFiles.py
+++ b/lldb/test/API/functionalities/inline-sourcefile/TestInlineSourceFiles.py
@@ -8,6 +8,8 @@ from lldbsuite.test import lldbutil
class InlineSourceFilesTestCase(TestBase):
@skipIf(compiler="gcc")
@skipIf(compiler="clang", compiler_version=["<", "18.0"])
+ # Fails on Windows for unknown reasons.
+ @skipIfWindows
def test(self):
"""Test DWARF inline source files."""
self.build()
diff --git a/lldb/test/Shell/SymbolFile/DWARF/Inputs/dwo-static-data-member.cpp b/lldb/test/Shell/SymbolFile/DWARF/Inputs/dwo-static-data-member.cpp
new file mode 100644
index 000000000000..fa7c3500df0f
--- /dev/null
+++ b/lldb/test/Shell/SymbolFile/DWARF/Inputs/dwo-static-data-member.cpp
@@ -0,0 +1,8 @@
+struct NoCtor {
+ NoCtor();
+ static int i;
+};
+
+int NoCtor::i = 15;
+
+int main() { return NoCtor::i; }
diff --git a/lldb/test/Shell/SymbolFile/DWARF/dwo-static-data-member-access.test b/lldb/test/Shell/SymbolFile/DWARF/dwo-static-data-member-access.test
new file mode 100644
index 000000000000..6e4deae7b9a0
--- /dev/null
+++ b/lldb/test/Shell/SymbolFile/DWARF/dwo-static-data-member-access.test
@@ -0,0 +1,24 @@
+# In DWARFv5, C++ static data members are represented
+# as DW_TAG_variable. We make sure LLDB's expression
+# evaluator doesn't crash when trying to parse such
+# a DW_TAG_variable DIE, whose parent DIE is only
+# a forward declaration.
+
+# RUN: %clangxx_host %S/Inputs/dwo-static-data-member.cpp \
+# RUN: -g -gdwarf-5 -gsplit-dwarf -flimit-debug-info -o %t
+# RUN: %lldb %t -s %s -o exit 2>&1 | FileCheck %s
+
+breakpoint set -n main
+process launch
+
+# CHECK: Process {{.*}} stopped
+
+# FIXME: The expression evaluator tries to attach
+# the static member's VarDecl to the NoCtor RecordDecl
+# before passing the AST to clang; this requires the
+# RecordDecl to be a full definition. But the debug-info
+# only contains forward declaration for NoCtor. So
+# LLDB fails to evaluate the expression.
+expression NoCtor::i
+# CHECK-LABEL: expression NoCtor::i
+# CHECK: use of undeclared identifier 'NoCtor'
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index b5918e3063d8..15abeb1c984c 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -12250,7 +12250,7 @@ This instruction requires several arguments:
#. Arguments with the :ref:`inalloca <attr_inalloca>` or
:ref:`preallocated <attr_preallocated>` attribute are forwarded in place.
#. If the musttail call appears in a function with the ``"thunk"`` attribute
- and the caller and callee both have varargs, than any unprototyped
+ and the caller and callee both have varargs, then any unprototyped
arguments in register or memory are forwarded to the callee. Similarly,
the return value of the callee is returned to the caller's caller, even
if a void return type is in use.
diff --git a/llvm/include/llvm/Analysis/MemoryBuiltins.h b/llvm/include/llvm/Analysis/MemoryBuiltins.h
index d080a5956a4d..37ce1518f00c 100644
--- a/llvm/include/llvm/Analysis/MemoryBuiltins.h
+++ b/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -190,7 +190,7 @@ Value *lowerObjectSizeCall(
/// SizeOffsetType - A base template class for the object size visitors. Used
/// here as a self-documenting way to handle the values rather than using a
/// \p std::pair.
-template <typename T, class C> class SizeOffsetType {
+template <typename T, class C> struct SizeOffsetType {
public:
T Size;
T Offset;
@@ -213,13 +213,11 @@ public:
/// SizeOffsetAPInt - Used by \p ObjectSizeOffsetVisitor, which works with
/// \p APInts.
-class SizeOffsetAPInt : public SizeOffsetType<APInt, SizeOffsetAPInt> {
- friend class SizeOffsetType;
- static bool known(APInt V) { return V.getBitWidth() > 1; }
-
-public:
+struct SizeOffsetAPInt : public SizeOffsetType<APInt, SizeOffsetAPInt> {
SizeOffsetAPInt() = default;
SizeOffsetAPInt(APInt Size, APInt Offset) : SizeOffsetType(Size, Offset) {}
+
+ static bool known(APInt V) { return V.getBitWidth() > 1; }
};
/// Evaluate the size and offset of an object pointed to by a Value*
@@ -274,30 +272,26 @@ private:
/// SizeOffsetValue - Used by \p ObjectSizeOffsetEvaluator, which works with
/// \p Values.
-class SizeOffsetWeakTrackingVH;
-class SizeOffsetValue : public SizeOffsetType<Value *, SizeOffsetValue> {
- friend class SizeOffsetType;
- static bool known(Value *V) { return V != nullptr; }
-
-public:
+struct SizeOffsetWeakTrackingVH;
+struct SizeOffsetValue : public SizeOffsetType<Value *, SizeOffsetValue> {
SizeOffsetValue() : SizeOffsetType(nullptr, nullptr) {}
SizeOffsetValue(Value *Size, Value *Offset) : SizeOffsetType(Size, Offset) {}
SizeOffsetValue(const SizeOffsetWeakTrackingVH &SOT);
+
+ static bool known(Value *V) { return V != nullptr; }
};
/// SizeOffsetWeakTrackingVH - Used by \p ObjectSizeOffsetEvaluator in a
/// \p DenseMap.
-class SizeOffsetWeakTrackingVH
+struct SizeOffsetWeakTrackingVH
: public SizeOffsetType<WeakTrackingVH, SizeOffsetWeakTrackingVH> {
- friend class SizeOffsetType;
- static bool known(WeakTrackingVH V) { return V.pointsToAliveValue(); }
-
-public:
SizeOffsetWeakTrackingVH() : SizeOffsetType(nullptr, nullptr) {}
SizeOffsetWeakTrackingVH(Value *Size, Value *Offset)
: SizeOffsetType(Size, Offset) {}
SizeOffsetWeakTrackingVH(const SizeOffsetValue &SOV)
: SizeOffsetType(SOV.Size, SOV.Offset) {}
+
+ static bool known(WeakTrackingVH V) { return V.pointsToAliveValue(); }
};
/// Evaluate the size and offset of an object pointed to by a Value*.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index bffc03ed0187..1b094d9d9fe7 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -366,6 +366,10 @@ private:
BranchProbability BranchProbToNext, Register Reg,
SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
+ void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
+ const SwitchCG::SwitchWorkListItem &W, Value *Cond,
+ MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB);
+
bool lowerJumpTableWorkItem(
SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
diff --git a/llvm/include/llvm/CodeGen/SwitchLoweringUtils.h b/llvm/include/llvm/CodeGen/SwitchLoweringUtils.h
index 5d06e21737b8..99478e9f39e2 100644
--- a/llvm/include/llvm/CodeGen/SwitchLoweringUtils.h
+++ b/llvm/include/llvm/CodeGen/SwitchLoweringUtils.h
@@ -293,6 +293,22 @@ public:
MachineBasicBlock *Src, MachineBasicBlock *Dst,
BranchProbability Prob = BranchProbability::getUnknown()) = 0;
+ /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
+ /// than each cluster in the range, its rank is 0.
+ unsigned caseClusterRank(const CaseCluster &CC, CaseClusterIt First,
+ CaseClusterIt Last);
+
+ struct SplitWorkItemInfo {
+ CaseClusterIt LastLeft;
+ CaseClusterIt FirstRight;
+ BranchProbability LeftProb;
+ BranchProbability RightProb;
+ };
+ /// Compute information to balance the tree based on branch probabilities to
+ /// create a near-optimal (in terms of search time given key frequency) binary
+ /// search tree. See e.g. Kurt Mehlhorn "Nearly Optimal Binary Search Trees"
+ /// (1975).
+ SplitWorkItemInfo computeSplitWorkItemInfo(const SwitchWorkListItem &W);
virtual ~SwitchLowering() = default;
private:
diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index 48afdb867ba6..447ac0f2aa61 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -1270,7 +1270,7 @@ inline DisjointOr_match<LHS, RHS, true> m_c_DisjointOr(const LHS &L,
return DisjointOr_match<LHS, RHS, true>(L, R);
}
-/// Match either "and" or "or disjoint".
+/// Match either "add" or "or disjoint".
template <typename LHS, typename RHS>
inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Add>,
DisjointOr_match<LHS, RHS>>
diff --git a/llvm/include/llvm/Object/ELFTypes.h b/llvm/include/llvm/Object/ELFTypes.h
index d3351a2d1650..956f7811dd6c 100644
--- a/llvm/include/llvm/Object/ELFTypes.h
+++ b/llvm/include/llvm/Object/ELFTypes.h
@@ -885,6 +885,9 @@ struct PGOAnalysisMap {
bool BBFreq : 1;
bool BrProb : 1;
+ // True if at least one feature is enabled
+ bool anyEnabled() const { return FuncEntryCount || BBFreq || BrProb; }
+
// Encodes to minimum bit width representation.
uint8_t encode() const {
return (static_cast<uint8_t>(FuncEntryCount) << 0) |
diff --git a/llvm/include/llvm/Passes/StandardInstrumentations.h b/llvm/include/llvm/Passes/StandardInstrumentations.h
index 2ec36cad244f..8c6a44876d54 100644
--- a/llvm/include/llvm/Passes/StandardInstrumentations.h
+++ b/llvm/include/llvm/Passes/StandardInstrumentations.h
@@ -65,7 +65,7 @@ private:
bool shouldPrintBeforePass(StringRef PassID);
bool shouldPrintAfterPass(StringRef PassID);
bool shouldPrintPassNumbers();
- bool shouldPrintAtPassNumber();
+ bool shouldPrintBeforePassNumber();
void pushPassRunDescriptor(StringRef PassID, Any IR,
std::string &DumpIRFilename);
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 241bdd81b75a..d0c27cae0dff 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -4313,7 +4313,7 @@ static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
// For vector types, the simplification must hold per-lane, so forbid
// potentially cross-lane operations like shufflevector.
if (!I->getType()->isVectorTy() || isa<ShuffleVectorInst>(I) ||
- isa<CallBase>(I))
+ isa<CallBase>(I) || isa<BitCastInst>(I))
return nullptr;
}
diff --git a/llvm/lib/Analysis/MemorySSAUpdater.cpp b/llvm/lib/Analysis/MemorySSAUpdater.cpp
index 9ad60f774e9f..e87ae7d71fff 100644
--- a/llvm/lib/Analysis/MemorySSAUpdater.cpp
+++ b/llvm/lib/Analysis/MemorySSAUpdater.cpp
@@ -568,7 +568,6 @@ static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
const ValueToValueMapTy &VMap,
PhiToDefMap &MPhiMap,
- bool CloneWasSimplified,
MemorySSA *MSSA) {
MemoryAccess *InsnDefining = MA;
if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
@@ -578,18 +577,10 @@ static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
if (Instruction *NewDefMUDI =
cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
- if (!CloneWasSimplified)
- assert(InsnDefining && "Defining instruction cannot be nullptr.");
- else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
+ if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
// The clone was simplified, it's no longer a MemoryDef, look up.
- auto DefIt = DefMUD->getDefsIterator();
- // Since simplified clones only occur in single block cloning, a
- // previous definition must exist, otherwise NewDefMUDI would not
- // have been found in VMap.
- assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
- "Previous def must exist");
InsnDefining = getNewDefiningAccessForClone(
- &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
+ DefMUD->getDefiningAccess(), VMap, MPhiMap, MSSA);
}
}
}
@@ -624,9 +615,9 @@ void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
NewInsn,
getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
- MPhiMap, CloneWasSimplified, MSSA),
+ MPhiMap, MSSA),
/*Template=*/CloneWasSimplified ? nullptr : MUD,
- /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
+ /*CreationMustSucceed=*/false);
if (NewUseOrDef)
MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
}
diff --git a/llvm/lib/Analysis/VFABIDemangling.cpp b/llvm/lib/Analysis/VFABIDemangling.cpp
index 426f98c0c628..8562d8fbfa1e 100644
--- a/llvm/lib/Analysis/VFABIDemangling.cpp
+++ b/llvm/lib/Analysis/VFABIDemangling.cpp
@@ -326,10 +326,6 @@ getScalableECFromSignature(const FunctionType *Signature, const VFISAKind ISA,
// Only vector parameters are used when determining the VF; uniform or
// linear are left as scalars, so do not affect VF.
if (Param.ParamKind == VFParamKind::Vector) {
- // If the scalar function doesn't actually have a corresponding argument,
- // reject the mapping.
- if (Param.ParamPos >= Signature->getNumParams())
- return std::nullopt;
Type *PTy = Signature->getParamType(Param.ParamPos);
std::optional<ElementCount> EC = getElementCountForTy(ISA, PTy);
@@ -427,6 +423,11 @@ std::optional<VFInfo> VFABI::tryDemangleForVFABI(StringRef MangledName,
if (Parameters.empty())
return std::nullopt;
+ // If the number of arguments of the scalar function does not match the
+ // vector variant we have just demangled then reject the mapping.
+ if (Parameters.size() != FTy->getNumParams())
+ return std::nullopt;
+
// Figure out the number of lanes in vectors for this function variant. This
// is easy for fixed length, as the vlen encoding just gives us the value
// directly. However, if the vlen mangling indicated that this function
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 9c11113902a2..6708f2baa5ed 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -751,16 +751,91 @@ bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
- // FIXME: At the moment we don't do any splitting optimizations here like
- // SelectionDAG does, so this worklist only has one entry.
while (!WorkList.empty()) {
SwitchWorkListItem W = WorkList.pop_back_val();
+
+ unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
+ // For optimized builds, lower large range as a balanced binary tree.
+ if (NumClusters > 3 &&
+ MF->getTarget().getOptLevel() != CodeGenOptLevel::None &&
+ !DefaultMBB->getParent()->getFunction().hasMinSize()) {
+ splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB, MIB);
+ continue;
+ }
+
if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
return false;
}
return true;
}
+void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
+ const SwitchCG::SwitchWorkListItem &W,
+ Value *Cond, MachineBasicBlock *SwitchMBB,
+ MachineIRBuilder &MIB) {
+ using namespace SwitchCG;
+ assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
+ "Clusters not sorted?");
+ assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
+
+ auto [LastLeft, FirstRight, LeftProb, RightProb] =
+ SL->computeSplitWorkItemInfo(W);
+
+ // Use the first element on the right as pivot since we will make less-than
+ // comparisons against it.
+ CaseClusterIt PivotCluster = FirstRight;
+ assert(PivotCluster > W.FirstCluster);
+ assert(PivotCluster <= W.LastCluster);
+
+ CaseClusterIt FirstLeft = W.FirstCluster;
+ CaseClusterIt LastRight = W.LastCluster;
+
+ const ConstantInt *Pivot = PivotCluster->Low;
+
+ // New blocks will be inserted immediately after the current one.
+ MachineFunction::iterator BBI(W.MBB);
+ ++BBI;
+
+ // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
+ // we can branch to its destination directly if it's squeezed exactly in
+ // between the known lower bound and Pivot - 1.
+ MachineBasicBlock *LeftMBB;
+ if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
+ FirstLeft->Low == W.GE &&
+ (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
+ LeftMBB = FirstLeft->MBB;
+ } else {
+ LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
+ FuncInfo.MF->insert(BBI, LeftMBB);
+ WorkList.push_back(
+ {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
+ }
+
+ // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
+ // single cluster, RHS.Low == Pivot, and we can branch to its destination
+ // directly if RHS.High equals the current upper bound.
+ MachineBasicBlock *RightMBB;
+ if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT &&
+ (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
+ RightMBB = FirstRight->MBB;
+ } else {
+ RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
+ FuncInfo.MF->insert(BBI, RightMBB);
+ WorkList.push_back(
+ {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
+ }
+
+ // Create the CaseBlock record that will be used to lower the branch.
+ CaseBlock CB(ICmpInst::Predicate::ICMP_SLT, false, Cond, Pivot, nullptr,
+ LeftMBB, RightMBB, W.MBB, MIB.getDebugLoc(), LeftProb,
+ RightProb);
+
+ if (W.MBB == SwitchMBB)
+ emitSwitchCase(CB, SwitchMBB, MIB);
+ else
+ SL->SwitchCases.push_back(CB);
+}
+
void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
MachineBasicBlock *MBB) {
// Emit the code for the jump table
diff --git a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp
index 893aa4a91828..56025aa5c45f 100644
--- a/llvm/lib/CodeGen/ReplaceWithVeclib.cpp
+++ b/llvm/lib/CodeGen/ReplaceWithVeclib.cpp
@@ -6,9 +6,9 @@
//
//===----------------------------------------------------------------------===//
//
-// Replaces calls to LLVM vector intrinsics (i.e., calls to LLVM intrinsics
-// with vector operands) with matching calls to functions from a vector
-// library (e.g., libmvec, SVML) according to TargetLibraryInfo.
+// Replaces LLVM IR instructions with vector operands (i.e., the frem
+// instruction or calls to LLVM intrinsics) with matching calls to functions
+// from a vector library (e.g libmvec, SVML) using TargetLibraryInfo interface.
//
//===----------------------------------------------------------------------===//
@@ -69,88 +69,98 @@ Function *getTLIFunction(Module *M, FunctionType *VectorFTy,
return TLIFunc;
}
-/// Replace the call to the vector intrinsic ( \p CalltoReplace ) with a call to
-/// the corresponding function from the vector library ( \p TLIVecFunc ).
-static void replaceWithTLIFunction(CallInst &CalltoReplace, VFInfo &Info,
+/// Replace the instruction \p I with a call to the corresponding function from
+/// the vector library (\p TLIVecFunc).
+static void replaceWithTLIFunction(Instruction &I, VFInfo &Info,
Function *TLIVecFunc) {
- IRBuilder<> IRBuilder(&CalltoReplace);
- SmallVector<Value *> Args(CalltoReplace.args());
+ IRBuilder<> IRBuilder(&I);
+ auto *CI = dyn_cast<CallInst>(&I);
+ SmallVector<Value *> Args(CI ? CI->args() : I.operands());
if (auto OptMaskpos = Info.getParamIndexForOptionalMask()) {
- auto *MaskTy = VectorType::get(Type::getInt1Ty(CalltoReplace.getContext()),
- Info.Shape.VF);
+ auto *MaskTy =
+ VectorType::get(Type::getInt1Ty(I.getContext()), Info.Shape.VF);
Args.insert(Args.begin() + OptMaskpos.value(),
Constant::getAllOnesValue(MaskTy));
}
- // Preserve the operand bundles.
+ // If it is a call instruction, preserve the operand bundles.
SmallVector<OperandBundleDef, 1> OpBundles;
- CalltoReplace.getOperandBundlesAsDefs(OpBundles);
- CallInst *Replacement = IRBuilder.CreateCall(TLIVecFunc, Args, OpBundles);
- CalltoReplace.replaceAllUsesWith(Replacement);
+ if (CI)
+ CI->getOperandBundlesAsDefs(OpBundles);
+
+ auto *Replacement = IRBuilder.CreateCall(TLIVecFunc, Args, OpBundles);
+ I.replaceAllUsesWith(Replacement);
// Preserve fast math flags for FP math.
if (isa<FPMathOperator>(Replacement))
- Replacement->copyFastMathFlags(&CalltoReplace);
+ Replacement->copyFastMathFlags(&I);
}
-/// Returns true when successfully replaced \p CallToReplace with a suitable
-/// function taking vector arguments, based on available mappings in the \p TLI.
-/// Currently only works when \p CallToReplace is a call to vectorized
-/// intrinsic.
+/// Returns true when successfully replaced \p I with a suitable function taking
+/// vector arguments, based on available mappings in the \p TLI. Currently only
+/// works when \p I is a call to vectorized intrinsic or the frem instruction.
static bool replaceWithCallToVeclib(const TargetLibraryInfo &TLI,
- CallInst &CallToReplace) {
- if (!CallToReplace.getCalledFunction())
- return false;
+ Instruction &I) {
+ // At the moment VFABI assumes the return type is always widened unless it is
+ // a void type.
+ auto *VTy = dyn_cast<VectorType>(I.getType());
+ ElementCount EC(VTy ? VTy->getElementCount() : ElementCount::getFixed(0));
- auto IntrinsicID = CallToReplace.getCalledFunction()->getIntrinsicID();
- // Replacement is only performed for intrinsic functions.
- if (IntrinsicID == Intrinsic::not_intrinsic)
- return false;
-
- // Compute arguments types of the corresponding scalar call. Additionally
- // checks if in the vector call, all vector operands have the same EC.
- ElementCount VF = ElementCount::getFixed(0);
- SmallVector<Type *> ScalarArgTypes;
- for (auto Arg : enumerate(CallToReplace.args())) {
- auto *ArgTy = Arg.value()->getType();
- if (isVectorIntrinsicWithScalarOpAtArg(IntrinsicID, Arg.index())) {
- ScalarArgTypes.push_back(ArgTy);
- } else if (auto *VectorArgTy = dyn_cast<VectorType>(ArgTy)) {
- ScalarArgTypes.push_back(ArgTy->getScalarType());
- // Disallow vector arguments with different VFs. When processing the first
- // vector argument, store it's VF, and for the rest ensure that they match
- // it.
- if (VF.isZero())
- VF = VectorArgTy->getElementCount();
- else if (VF != VectorArgTy->getElementCount())
+ // Compute the argument types of the corresponding scalar call and the scalar
+ // function name. For calls, it additionally finds the function to replace
+ // and checks that all vector operands match the previously found EC.
+ SmallVector<Type *, 8> ScalarArgTypes;
+ std::string ScalarName;
+ Function *FuncToReplace = nullptr;
+ if (auto *CI = dyn_cast<CallInst>(&I)) {
+ FuncToReplace = CI->getCalledFunction();
+ Intrinsic::ID IID = FuncToReplace->getIntrinsicID();
+ assert(IID != Intrinsic::not_intrinsic && "Not an intrinsic");
+ for (auto Arg : enumerate(CI->args())) {
+ auto *ArgTy = Arg.value()->getType();
+ if (isVectorIntrinsicWithScalarOpAtArg(IID, Arg.index())) {
+ ScalarArgTypes.push_back(ArgTy);
+ } else if (auto *VectorArgTy = dyn_cast<VectorType>(ArgTy)) {
+ ScalarArgTypes.push_back(VectorArgTy->getElementType());
+ // When return type is void, set EC to the first vector argument, and
+ // disallow vector arguments with different ECs.
+ if (EC.isZero())
+ EC = VectorArgTy->getElementCount();
+ else if (EC != VectorArgTy->getElementCount())
+ return false;
+ } else
+ // Exit when it is supposed to be a vector argument but it isn't.
return false;
- } else
- // Exit when it is supposed to be a vector argument but it isn't.
+ }
+ // Try to reconstruct the name for the scalar version of the instruction,
+ // using scalar argument types.
+ ScalarName = Intrinsic::isOverloaded(IID)
+ ? Intrinsic::getName(IID, ScalarArgTypes, I.getModule())
+ : Intrinsic::getName(IID).str();
+ } else {
+ assert(VTy && "Return type must be a vector");
+ auto *ScalarTy = VTy->getScalarType();
+ LibFunc Func;
+ if (!TLI.getLibFunc(I.getOpcode(), ScalarTy, Func))
return false;
+ ScalarName = TLI.getName(Func);
+ ScalarArgTypes = {ScalarTy, ScalarTy};
}
- // Try to reconstruct the name for the scalar version of this intrinsic using
- // the intrinsic ID and the argument types converted to scalar above.
- std::string ScalarName =
- (Intrinsic::isOverloaded(IntrinsicID)
- ? Intrinsic::getName(IntrinsicID, ScalarArgTypes,
- CallToReplace.getModule())
- : Intrinsic::getName(IntrinsicID).str());
-
// Try to find the mapping for the scalar version of this intrinsic and the
// exact vector width of the call operands in the TargetLibraryInfo. First,
// check with a non-masked variant, and if that fails try with a masked one.
const VecDesc *VD =
- TLI.getVectorMappingInfo(ScalarName, VF, /*Masked*/ false);
- if (!VD && !(VD = TLI.getVectorMappingInfo(ScalarName, VF, /*Masked*/ true)))
+ TLI.getVectorMappingInfo(ScalarName, EC, /*Masked*/ false);
+ if (!VD && !(VD = TLI.getVectorMappingInfo(ScalarName, EC, /*Masked*/ true)))
return false;
LLVM_DEBUG(dbgs() << DEBUG_TYPE << ": Found TLI mapping from: `" << ScalarName
- << "` and vector width " << VF << " to: `"
+ << "` and vector width " << EC << " to: `"
<< VD->getVectorFnName() << "`.\n");
// Replace the call to the intrinsic with a call to the vector library
// function.
- Type *ScalarRetTy = CallToReplace.getType()->getScalarType();
+ Type *ScalarRetTy = I.getType()->getScalarType();
FunctionType *ScalarFTy =
FunctionType::get(ScalarRetTy, ScalarArgTypes, /*isVarArg*/ false);
const std::string MangledName = VD->getVectorFunctionABIVariantString();
@@ -162,27 +172,37 @@ static bool replaceWithCallToVeclib(const TargetLibraryInfo &TLI,
if (!VectorFTy)
return false;
- Function *FuncToReplace = CallToReplace.getCalledFunction();
- Function *TLIFunc = getTLIFunction(CallToReplace.getModule(), VectorFTy,
+ Function *TLIFunc = getTLIFunction(I.getModule(), VectorFTy,
VD->getVectorFnName(), FuncToReplace);
- replaceWithTLIFunction(CallToReplace, *OptInfo, TLIFunc);
-
- LLVM_DEBUG(dbgs() << DEBUG_TYPE << ": Replaced call to `"
- << FuncToReplace->getName() << "` with call to `"
- << TLIFunc->getName() << "`.\n");
+ replaceWithTLIFunction(I, *OptInfo, TLIFunc);
+ LLVM_DEBUG(dbgs() << DEBUG_TYPE << ": Replaced call to `" << ScalarName
+ << "` with call to `" << TLIFunc->getName() << "`.\n");
++NumCallsReplaced;
return true;
}
+/// Supported instruction \p I must be a vectorized frem or a call to an
+/// intrinsic that returns either void or a vector.
+static bool isSupportedInstruction(Instruction *I) {
+ Type *Ty = I->getType();
+ if (auto *CI = dyn_cast<CallInst>(I))
+ return (Ty->isVectorTy() || Ty->isVoidTy()) && CI->getCalledFunction() &&
+ CI->getCalledFunction()->getIntrinsicID() !=
+ Intrinsic::not_intrinsic;
+ if (I->getOpcode() == Instruction::FRem && Ty->isVectorTy())
+ return true;
+ return false;
+}
+
static bool runImpl(const TargetLibraryInfo &TLI, Function &F) {
bool Changed = false;
- SmallVector<CallInst *> ReplacedCalls;
+ SmallVector<Instruction *> ReplacedCalls;
for (auto &I : instructions(F)) {
- if (auto *CI = dyn_cast<CallInst>(&I)) {
- if (replaceWithCallToVeclib(TLI, *CI)) {
- ReplacedCalls.push_back(CI);
- Changed = true;
- }
+ if (!isSupportedInstruction(&I))
+ continue;
+ if (replaceWithCallToVeclib(TLI, I)) {
+ ReplacedCalls.push_back(&I);
+ Changed = true;
}
}
// Erase the calls to the intrinsics that have been replaced
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 78ebd2d33459..1ae682eaf251 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -11639,92 +11639,16 @@ void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
}
}
-unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
- CaseClusterIt First,
- CaseClusterIt Last) {
- return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
- if (X.Prob != CC.Prob)
- return X.Prob > CC.Prob;
-
- // Ties are broken by comparing the case value.
- return X.Low->getValue().slt(CC.Low->getValue());
- });
-}
-
void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
const SwitchWorkListItem &W,
Value *Cond,
MachineBasicBlock *SwitchMBB) {
assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
"Clusters not sorted?");
-
assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
- // Balance the tree based on branch probabilities to create a near-optimal (in
- // terms of search time given key frequency) binary search tree. See e.g. Kurt
- // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
- CaseClusterIt LastLeft = W.FirstCluster;
- CaseClusterIt FirstRight = W.LastCluster;
- auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
- auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
-
- // Move LastLeft and FirstRight towards each other from opposite directions to
- // find a partitioning of the clusters which balances the probability on both
- // sides. If LeftProb and RightProb are equal, alternate which side is
- // taken to ensure 0-probability nodes are distributed evenly.
- unsigned I = 0;
- while (LastLeft + 1 < FirstRight) {
- if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
- LeftProb += (++LastLeft)->Prob;
- else
- RightProb += (--FirstRight)->Prob;
- I++;
- }
-
- while (true) {
- // Our binary search tree differs from a typical BST in that ours can have up
- // to three values in each leaf. The pivot selection above doesn't take that
- // into account, which means the tree might require more nodes and be less
- // efficient. We compensate for this here.
-
- unsigned NumLeft = LastLeft - W.FirstCluster + 1;
- unsigned NumRight = W.LastCluster - FirstRight + 1;
-
- if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
- // If one side has less than 3 clusters, and the other has more than 3,
- // consider taking a cluster from the other side.
-
- if (NumLeft < NumRight) {
- // Consider moving the first cluster on the right to the left side.
- CaseCluster &CC = *FirstRight;
- unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
- unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
- if (LeftSideRank <= RightSideRank) {
- // Moving the cluster to the left does not demote it.
- ++LastLeft;
- ++FirstRight;
- continue;
- }
- } else {
- assert(NumRight < NumLeft);
- // Consider moving the last element on the left to the right side.
- CaseCluster &CC = *LastLeft;
- unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
- unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
- if (RightSideRank <= LeftSideRank) {
- // Moving the cluster to the right does not demot it.
- --LastLeft;
- --FirstRight;
- continue;
- }
- }
- }
- break;
- }
-
- assert(LastLeft + 1 == FirstRight);
- assert(LastLeft >= W.FirstCluster);
- assert(FirstRight <= W.LastCluster);
+ auto [LastLeft, FirstRight, LeftProb, RightProb] =
+ SL->computeSplitWorkItemInfo(W);
// Use the first element on the right as pivot since we will make less-than
// comparisons against it.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index 2e102c002c09..6dcb8c816ad0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -200,12 +200,6 @@ private:
/// create.
unsigned SDNodeOrder;
- /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
- /// than each cluster in the range, its rank is 0.
- unsigned caseClusterRank(const SwitchCG::CaseCluster &CC,
- SwitchCG::CaseClusterIt First,
- SwitchCG::CaseClusterIt Last);
-
/// Emit comparison and split W into two subtrees.
void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
const SwitchCG::SwitchWorkListItem &W, Value *Cond,
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index f8400e8e94df..e3e3e375d6a6 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -2444,6 +2444,13 @@ bool TargetLowering::SimplifyDemandedBits(
unsigned InElts = SrcVT.isFixedLengthVector() ? SrcVT.getVectorNumElements() : 1;
bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG;
+ APInt InDemandedElts = DemandedElts.zext(InElts);
+ APInt InDemandedBits = DemandedBits.trunc(InBits);
+
+ // Since some of the sign extended bits are demanded, we know that the sign
+ // bit is demanded.
+ InDemandedBits.setBit(InBits - 1);
+
// If none of the top bits are demanded, convert this into an any_extend.
if (DemandedBits.getActiveBits() <= InBits) {
// If we only need the non-extended bits of the bottom element
@@ -2452,19 +2459,17 @@ bool TargetLowering::SimplifyDemandedBits(
VT.getSizeInBits() == SrcVT.getSizeInBits())
return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
- unsigned Opc =
- IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
- if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
- return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
+ // Don't lose an all signbits 0/-1 splat on targets with 0/-1 booleans.
+ if (getBooleanContents(VT) != ZeroOrNegativeOneBooleanContent ||
+ TLO.DAG.ComputeNumSignBits(Src, InDemandedElts, Depth + 1) !=
+ InBits) {
+ unsigned Opc =
+ IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
+ if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
+ return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
+ }
}
- APInt InDemandedBits = DemandedBits.trunc(InBits);
- APInt InDemandedElts = DemandedElts.zext(InElts);
-
- // Since some of the sign extended bits are demanded, we know that the sign
- // bit is demanded.
- InDemandedBits.setBit(InBits - 1);
-
if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO,
Depth + 1))
return true;
diff --git a/llvm/lib/CodeGen/SwitchLoweringUtils.cpp b/llvm/lib/CodeGen/SwitchLoweringUtils.cpp
index 7982d80353bd..8922fa589813 100644
--- a/llvm/lib/CodeGen/SwitchLoweringUtils.cpp
+++ b/llvm/lib/CodeGen/SwitchLoweringUtils.cpp
@@ -494,3 +494,84 @@ void SwitchCG::sortAndRangeify(CaseClusterVector &Clusters) {
}
Clusters.resize(DstIndex);
}
+
+unsigned SwitchCG::SwitchLowering::caseClusterRank(const CaseCluster &CC,
+ CaseClusterIt First,
+ CaseClusterIt Last) {
+ return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
+ if (X.Prob != CC.Prob)
+ return X.Prob > CC.Prob;
+
+ // Ties are broken by comparing the case value.
+ return X.Low->getValue().slt(CC.Low->getValue());
+ });
+}
+
+llvm::SwitchCG::SwitchLowering::SplitWorkItemInfo
+SwitchCG::SwitchLowering::computeSplitWorkItemInfo(
+ const SwitchWorkListItem &W) {
+ CaseClusterIt LastLeft = W.FirstCluster;
+ CaseClusterIt FirstRight = W.LastCluster;
+ auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
+ auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
+
+ // Move LastLeft and FirstRight towards each other from opposite directions to
+ // find a partitioning of the clusters which balances the probability on both
+ // sides. If LeftProb and RightProb are equal, alternate which side is
+ // taken to ensure 0-probability nodes are distributed evenly.
+ unsigned I = 0;
+ while (LastLeft + 1 < FirstRight) {
+ if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
+ LeftProb += (++LastLeft)->Prob;
+ else
+ RightProb += (--FirstRight)->Prob;
+ I++;
+ }
+
+ while (true) {
+ // Our binary search tree differs from a typical BST in that ours can have
+ // up to three values in each leaf. The pivot selection above doesn't take
+ // that into account, which means the tree might require more nodes and be
+ // less efficient. We compensate for this here.
+
+ unsigned NumLeft = LastLeft - W.FirstCluster + 1;
+ unsigned NumRight = W.LastCluster - FirstRight + 1;
+
+ if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
+ // If one side has less than 3 clusters, and the other has more than 3,
+ // consider taking a cluster from the other side.
+
+ if (NumLeft < NumRight) {
+ // Consider moving the first cluster on the right to the left side.
+ CaseCluster &CC = *FirstRight;
+ unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
+ unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
+ if (LeftSideRank <= RightSideRank) {
+ // Moving the cluster to the left does not demote it.
+ ++LastLeft;
+ ++FirstRight;
+ continue;
+ }
+ } else {
+ assert(NumRight < NumLeft);
+ // Consider moving the last element on the left to the right side.
+ CaseCluster &CC = *LastLeft;
+ unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
+ unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
+ if (RightSideRank <= LeftSideRank) {
+ // Moving the cluster to the right does not demot it.
+ --LastLeft;
+ --FirstRight;
+ continue;
+ }
+ }
+ }
+ break;
+ }
+
+ assert(LastLeft + 1 == FirstRight);
+ assert(LastLeft >= W.FirstCluster);
+ assert(FirstRight <= W.LastCluster);
+
+ return SplitWorkItemInfo{LastLeft, FirstRight, LeftProb, RightProb};
+} \ No newline at end of file
diff --git a/llvm/lib/DWARFLinker/DWARFStreamer.cpp b/llvm/lib/DWARFLinker/DWARFStreamer.cpp
index faa91364ec9c..3ec082f4ea0c 100644
--- a/llvm/lib/DWARFLinker/DWARFStreamer.cpp
+++ b/llvm/lib/DWARFLinker/DWARFStreamer.cpp
@@ -859,10 +859,8 @@ void DwarfStreamer::emitLineTablePrologueV5IncludeAndFileTable(
for (auto Include : P.IncludeDirectories)
emitLineTableString(P, Include, DebugStrPool, DebugLineStrPool);
- bool InlineSources = any_of(P.FileNames, [](auto &File) {
- auto s = dwarf::toString(File.Source);
- return s && !**s;
- });
+ bool HasChecksums = P.ContentTypes.HasMD5;
+ bool HasInlineSources = P.ContentTypes.HasSource;
if (P.FileNames.empty()) {
// file_name_entry_format_count (ubyte).
@@ -870,7 +868,7 @@ void DwarfStreamer::emitLineTablePrologueV5IncludeAndFileTable(
LineSectionSize += 1;
} else {
// file_name_entry_format_count (ubyte).
- MS->emitInt8(2 + (InlineSources ? 1 : 0));
+ MS->emitInt8(2 + (HasChecksums ? 1 : 0) + (HasInlineSources ? 1 : 0));
LineSectionSize += 1;
// file_name_entry_format (sequence of ULEB128 pairs).
@@ -880,7 +878,13 @@ void DwarfStreamer::emitLineTablePrologueV5IncludeAndFileTable(
LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_LNCT_directory_index);
LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_FORM_data1);
- if (InlineSources) {
+
+ if (HasChecksums) {
+ LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_LNCT_MD5);
+ LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_FORM_data16);
+ }
+
+ if (HasInlineSources) {
LineSectionSize += MS->emitULEB128IntValue(dwarf::DW_LNCT_LLVM_source);
LineSectionSize += MS->emitULEB128IntValue(StrForm);
}
@@ -894,7 +898,13 @@ void DwarfStreamer::emitLineTablePrologueV5IncludeAndFileTable(
emitLineTableString(P, File.Name, DebugStrPool, DebugLineStrPool);
MS->emitInt8(File.DirIdx);
LineSectionSize += 1;
- if (InlineSources)
+ if (HasChecksums) {
+ MS->emitBinaryData(
+ StringRef(reinterpret_cast<const char *>(File.Checksum.data()),
+ File.Checksum.size()));
+ LineSectionSize += File.Checksum.size();
+ }
+ if (HasInlineSources)
emitLineTableString(P, File.Source, DebugStrPool, DebugLineStrPool);
}
}
diff --git a/llvm/lib/DWARFLinkerParallel/DebugLineSectionEmitter.h b/llvm/lib/DWARFLinkerParallel/DebugLineSectionEmitter.h
index fc7f8cbc4a8e..27c63fad712f 100644
--- a/llvm/lib/DWARFLinkerParallel/DebugLineSectionEmitter.h
+++ b/llvm/lib/DWARFLinkerParallel/DebugLineSectionEmitter.h
@@ -197,7 +197,7 @@ private:
Section.emitIntVal(0, 1);
} else {
// file_name_entry_format_count (ubyte).
- Section.emitIntVal(2, 1);
+ Section.emitIntVal(2 + (P.ContentTypes.HasMD5 ? 1 : 0), 1);
// file_name_entry_format (sequence of ULEB128 pairs).
encodeULEB128(dwarf::DW_LNCT_path, Section.OS);
@@ -205,6 +205,11 @@ private:
encodeULEB128(dwarf::DW_LNCT_directory_index, Section.OS);
encodeULEB128(dwarf::DW_FORM_data1, Section.OS);
+
+ if (P.ContentTypes.HasMD5) {
+ encodeULEB128(dwarf::DW_LNCT_MD5, Section.OS);
+ encodeULEB128(dwarf::DW_FORM_data16, Section.OS);
+ }
}
// file_names_count (ULEB128).
@@ -222,6 +227,12 @@ private:
// source file.
Section.emitString(File.Name.getForm(), *FileNameStr);
Section.emitIntVal(File.DirIdx, 1);
+
+ if (P.ContentTypes.HasMD5) {
+ Section.emitBinaryData(
+ StringRef(reinterpret_cast<const char *>(File.Checksum.data()),
+ File.Checksum.size()));
+ }
}
}
diff --git a/llvm/lib/DWARFLinkerParallel/OutputSections.cpp b/llvm/lib/DWARFLinkerParallel/OutputSections.cpp
index 9c3e3ebd220a..730ae0f83d7b 100644
--- a/llvm/lib/DWARFLinkerParallel/OutputSections.cpp
+++ b/llvm/lib/DWARFLinkerParallel/OutputSections.cpp
@@ -227,6 +227,10 @@ void SectionDescriptor::emitIntVal(uint64_t Val, unsigned Size) {
}
}
+void SectionDescriptor::emitBinaryData(llvm::StringRef Data) {
+ OS.write(Data.data(), Data.size());
+}
+
void SectionDescriptor::apply(uint64_t PatchOffset, dwarf::Form AttrForm,
uint64_t Val) {
switch (AttrForm) {
diff --git a/llvm/lib/DWARFLinkerParallel/OutputSections.h b/llvm/lib/DWARFLinkerParallel/OutputSections.h
index f23b2efb869d..0f394b0810ea 100644
--- a/llvm/lib/DWARFLinkerParallel/OutputSections.h
+++ b/llvm/lib/DWARFLinkerParallel/OutputSections.h
@@ -283,6 +283,8 @@ struct SectionDescriptor {
void emitString(dwarf::Form StringForm, const char *StringVal);
+ void emitBinaryData(llvm::StringRef Data);
+
/// Emit specified inplace string value into the current section contents.
void emitInplaceString(StringRef String) {
OS << GlobalData.translateString(String);
diff --git a/llvm/lib/Object/ELF.cpp b/llvm/lib/Object/ELF.cpp
index 300639f2bfa0..f24395b02043 100644
--- a/llvm/lib/Object/ELF.cpp
+++ b/llvm/lib/Object/ELF.cpp
@@ -774,7 +774,7 @@ decodeBBAddrMapImpl(const ELFFile<ELFT> &EF,
}
FunctionEntries.emplace_back(Address, std::move(BBEntries));
- if (FeatEnable.FuncEntryCount || FeatEnable.BBFreq || FeatEnable.BrProb) {
+ if (PGOAnalyses || FeatEnable.anyEnabled()) {
// Function entry count
uint64_t FuncEntryCount =
FeatEnable.FuncEntryCount
@@ -782,8 +782,9 @@ decodeBBAddrMapImpl(const ELFFile<ELFT> &EF,
: 0;
std::vector<PGOAnalysisMap::PGOBBEntry> PGOBBEntries;
- for (uint32_t BlockIndex = 0; !MetadataDecodeErr && !ULEBSizeErr && Cur &&
- (BlockIndex < NumBlocks);
+ for (uint32_t BlockIndex = 0;
+ (FeatEnable.BBFreq || FeatEnable.BrProb) && !MetadataDecodeErr &&
+ !ULEBSizeErr && Cur && (BlockIndex < NumBlocks);
++BlockIndex) {
// Block frequency
uint64_t BBF = FeatEnable.BBFreq
diff --git a/llvm/lib/Passes/StandardInstrumentations.cpp b/llvm/lib/Passes/StandardInstrumentations.cpp
index fd1317e3eb25..d467fe5c9a8e 100644
--- a/llvm/lib/Passes/StandardInstrumentations.cpp
+++ b/llvm/lib/Passes/StandardInstrumentations.cpp
@@ -118,10 +118,10 @@ static cl::opt<bool> PrintPassNumbers(
"print-pass-numbers", cl::init(false), cl::Hidden,
cl::desc("Print pass names and their ordinals"));
-static cl::opt<unsigned>
- PrintAtPassNumber("print-at-pass-number", cl::init(0), cl::Hidden,
- cl::desc("Print IR at pass with this number as "
- "reported by print-passes-names"));
+static cl::opt<unsigned> PrintBeforePassNumber(
+ "print-before-pass-number", cl::init(0), cl::Hidden,
+ cl::desc("Print IR before the pass with this number as "
+ "reported by print-pass-numbers"));
static cl::opt<std::string> IRDumpDirectory(
"ir-dump-directory",
@@ -806,8 +806,7 @@ void PrintIRInstrumentation::printBeforePass(StringRef PassID, Any IR) {
// Note: here we rely on a fact that we do not change modules while
// traversing the pipeline, so the latest captured module is good
// for all print operations that has not happen yet.
- if (shouldPrintPassNumbers() || shouldPrintAtPassNumber() ||
- shouldPrintAfterPass(PassID))
+ if (shouldPrintAfterPass(PassID))
pushPassRunDescriptor(PassID, IR, DumpIRFilename);
if (!shouldPrintIR(IR))
@@ -823,8 +822,10 @@ void PrintIRInstrumentation::printBeforePass(StringRef PassID, Any IR) {
return;
auto WriteIRToStream = [&](raw_ostream &Stream) {
- Stream << "; *** IR Dump Before " << PassID << " on " << getIRName(IR)
- << " ***\n";
+ Stream << "; *** IR Dump Before ";
+ if (shouldPrintBeforePassNumber())
+ Stream << CurrentPassNumber << "-";
+ Stream << PassID << " on " << getIRName(IR) << " ***\n";
unwrapAndPrint(Stream, IR);
};
@@ -842,8 +843,7 @@ void PrintIRInstrumentation::printAfterPass(StringRef PassID, Any IR) {
if (isIgnored(PassID))
return;
- if (!shouldPrintAfterPass(PassID) && !shouldPrintPassNumbers() &&
- !shouldPrintAtPassNumber())
+ if (!shouldPrintAfterPass(PassID))
return;
auto [M, DumpIRFilename, IRName, StoredPassID] = popPassRunDescriptor(PassID);
@@ -853,10 +853,7 @@ void PrintIRInstrumentation::printAfterPass(StringRef PassID, Any IR) {
return;
auto WriteIRToStream = [&](raw_ostream &Stream, const StringRef IRName) {
- Stream << "; *** IR Dump "
- << (shouldPrintAtPassNumber()
- ? StringRef(formatv("At {0}-{1}", CurrentPassNumber, PassID))
- : StringRef(formatv("After {0}", PassID)))
+ Stream << "; *** IR Dump " << StringRef(formatv("After {0}", PassID))
<< " on " << IRName << " ***\n";
unwrapAndPrint(Stream, IR);
};
@@ -879,8 +876,7 @@ void PrintIRInstrumentation::printAfterPassInvalidated(StringRef PassID) {
if (isIgnored(PassID))
return;
- if (!shouldPrintAfterPass(PassID) && !shouldPrintPassNumbers() &&
- !shouldPrintAtPassNumber())
+ if (!shouldPrintAfterPass(PassID))
return;
auto [M, DumpIRFilename, IRName, StoredPassID] = popPassRunDescriptor(PassID);
@@ -893,12 +889,8 @@ void PrintIRInstrumentation::printAfterPassInvalidated(StringRef PassID) {
auto WriteIRToStream = [&](raw_ostream &Stream, const Module *M,
const StringRef IRName) {
SmallString<20> Banner;
- if (shouldPrintAtPassNumber())
- Banner = formatv("; *** IR Dump At {0}-{1} on {2} (invalidated) ***",
- CurrentPassNumber, PassID, IRName);
- else
- Banner = formatv("; *** IR Dump After {0} on {1} (invalidated) ***",
- PassID, IRName);
+ Banner = formatv("; *** IR Dump After {0} on {1} (invalidated) ***", PassID,
+ IRName);
Stream << Banner << "\n";
printIR(Stream, M);
};
@@ -921,6 +913,10 @@ bool PrintIRInstrumentation::shouldPrintBeforePass(StringRef PassID) {
if (shouldPrintBeforeAll())
return true;
+ if (shouldPrintBeforePassNumber() &&
+ CurrentPassNumber == PrintBeforePassNumber)
+ return true;
+
StringRef PassName = PIC->getPassNameForClassName(PassID);
return is_contained(printBeforePasses(), PassName);
}
@@ -929,9 +925,6 @@ bool PrintIRInstrumentation::shouldPrintAfterPass(StringRef PassID) {
if (shouldPrintAfterAll())
return true;
- if (shouldPrintAtPassNumber() && CurrentPassNumber == PrintAtPassNumber)
- return true;
-
StringRef PassName = PIC->getPassNameForClassName(PassID);
return is_contained(printAfterPasses(), PassName);
}
@@ -940,8 +933,8 @@ bool PrintIRInstrumentation::shouldPrintPassNumbers() {
return PrintPassNumbers;
}
-bool PrintIRInstrumentation::shouldPrintAtPassNumber() {
- return PrintAtPassNumber > 0;
+bool PrintIRInstrumentation::shouldPrintBeforePassNumber() {
+ return PrintBeforePassNumber > 0;
}
void PrintIRInstrumentation::registerCallbacks(
@@ -950,13 +943,12 @@ void PrintIRInstrumentation::registerCallbacks(
// BeforePass callback is not just for printing, it also saves a Module
// for later use in AfterPassInvalidated.
- if (shouldPrintPassNumbers() || shouldPrintAtPassNumber() ||
+ if (shouldPrintPassNumbers() || shouldPrintBeforePassNumber() ||
shouldPrintBeforeSomePass() || shouldPrintAfterSomePass())
PIC.registerBeforeNonSkippedPassCallback(
[this](StringRef P, Any IR) { this->printBeforePass(P, IR); });
- if (shouldPrintPassNumbers() || shouldPrintAtPassNumber() ||
- shouldPrintAfterSomePass()) {
+ if (shouldPrintAfterSomePass()) {
PIC.registerAfterPassCallback(
[this](StringRef P, Any IR, const PreservedAnalyses &) {
this->printAfterPass(P, IR);
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 102fd0c3dae2..3583b7d2ce4e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16516,9 +16516,9 @@ static SDValue performUADDVAddCombine(SDValue A, SelectionDAG &DAG) {
if (Ext0.getOperand(0).getValueType().getVectorNumElements() !=
VT.getVectorNumElements() * 2)
return SDValue();
- if ((Ext0.getConstantOperandVal(1) != 0 &&
+ if ((Ext0.getConstantOperandVal(1) != 0 ||
Ext1.getConstantOperandVal(1) != VT.getVectorNumElements()) &&
- (Ext1.getConstantOperandVal(1) != 0 &&
+ (Ext1.getConstantOperandVal(1) != 0 ||
Ext0.getConstantOperandVal(1) != VT.getVectorNumElements()))
return SDValue();
unsigned Opcode = Op0.getOpcode() == ISD::ZERO_EXTEND ? AArch64ISD::UADDLP
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 470742cdc30e..b657a0954d78 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -366,7 +366,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
{v4s32, p0, s128, 8},
{v2s64, p0, s128, 8}})
// These extends are also legal
- .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 8}})
+ .legalForTypesWithMemDesc(
+ {{s32, p0, s8, 8}, {s32, p0, s16, 8}, {s64, p0, s32, 8}})
.widenScalarToNextPow2(0, /* MinSize = */ 8)
.lowerIfMemSizeNotByteSizePow2()
.clampScalar(0, s8, s64)
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
index e96ec6db3a1b..c62a9d847c52 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.h
@@ -23,7 +23,6 @@ namespace llvm {
class AArch64Subtarget;
-/// This class provides the information for the target register banks.
class AArch64LegalizerInfo : public LegalizerInfo {
public:
AArch64LegalizerInfo(const AArch64Subtarget &ST);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
index 8d4cad4c07bc..0c77fe725958 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
@@ -104,6 +104,13 @@ def foldable_fneg : GICombineRule<
[{ return Helper.matchFoldableFneg(*${ffn}, ${matchinfo}); }]),
(apply [{ Helper.applyFoldableFneg(*${ffn}, ${matchinfo}); }])>;
+// Detects s_mul_u64 instructions whose higher bits are zero/sign extended.
+def smulu64 : GICombineRule<
+ (defs root:$smul, unsigned_matchinfo:$matchinfo),
+ (match (wip_match_opcode G_MUL):$smul,
+ [{ return matchCombine_s_mul_u64(*${smul}, ${matchinfo}); }]),
+ (apply [{ applyCombine_s_mul_u64(*${smul}, ${matchinfo}); }])>;
+
def sign_exension_in_reg_matchdata : GIDefMatchData<"MachineInstr *">;
def sign_extension_in_reg : GICombineRule<
@@ -149,7 +156,7 @@ def AMDGPUPostLegalizerCombiner: GICombiner<
"AMDGPUPostLegalizerCombinerImpl",
[all_combines, gfx6gfx7_combines, gfx8_combines,
uchar_to_float, cvt_f32_ubyteN, remove_fcanonicalize, foldable_fneg,
- rcp_sqrt_to_rsq, sign_extension_in_reg]> {
+ rcp_sqrt_to_rsq, sign_extension_in_reg, smulu64]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 2f663571a8f9..0dbcaf5a1b13 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -387,18 +387,20 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
MVT::v9i32, MVT::v9f32, MVT::v10i32, MVT::v10f32,
MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32},
Custom);
+
+ // FIXME: Why is v8f16/v8bf16 missing?
setOperationAction(
ISD::EXTRACT_SUBVECTOR,
- {MVT::v2f16, MVT::v2i16, MVT::v2bf16, MVT::v4f16, MVT::v4i16,
- MVT::v4bf16, MVT::v2f32, MVT::v2i32, MVT::v3f32, MVT::v3i32,
+ {MVT::v2f16, MVT::v2bf16, MVT::v2i16, MVT::v4f16, MVT::v4bf16,
+ MVT::v4i16, MVT::v2f32, MVT::v2i32, MVT::v3f32, MVT::v3i32,
MVT::v4f32, MVT::v4i32, MVT::v5f32, MVT::v5i32, MVT::v6f32,
MVT::v6i32, MVT::v7f32, MVT::v7i32, MVT::v8f32, MVT::v8i32,
MVT::v9f32, MVT::v9i32, MVT::v10i32, MVT::v10f32, MVT::v11i32,
- MVT::v11f32, MVT::v12i32, MVT::v12f32, MVT::v16f16, MVT::v16i16,
- MVT::v16f32, MVT::v16i32, MVT::v32f32, MVT::v32i32, MVT::v2f64,
- MVT::v2i64, MVT::v3f64, MVT::v3i64, MVT::v4f64, MVT::v4i64,
- MVT::v8f64, MVT::v8i64, MVT::v16f64, MVT::v16i64, MVT::v32i16,
- MVT::v32f16},
+ MVT::v11f32, MVT::v12i32, MVT::v12f32, MVT::v16f16, MVT::v16bf16,
+ MVT::v16i16, MVT::v16f32, MVT::v16i32, MVT::v32f32, MVT::v32i32,
+ MVT::v2f64, MVT::v2i64, MVT::v3f64, MVT::v3i64, MVT::v4f64,
+ MVT::v4i64, MVT::v8f64, MVT::v8i64, MVT::v16f64, MVT::v16i64,
+ MVT::v32i16, MVT::v32f16, MVT::v32bf16},
Custom);
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index dfbe5c7fed88..aa235c07e995 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -701,13 +701,23 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.maxScalar(0, S32);
}
- getActionDefinitionsBuilder(G_MUL)
- .legalFor({S32, S16, V2S16})
- .clampMaxNumElementsStrict(0, S16, 2)
- .scalarize(0)
- .minScalar(0, S16)
- .widenScalarToNextMultipleOf(0, 32)
- .custom();
+ if (ST.hasScalarSMulU64()) {
+ getActionDefinitionsBuilder(G_MUL)
+ .legalFor({S64, S32, S16, V2S16})
+ .clampMaxNumElementsStrict(0, S16, 2)
+ .scalarize(0)
+ .minScalar(0, S16)
+ .widenScalarToNextMultipleOf(0, 32)
+ .custom();
+ } else {
+ getActionDefinitionsBuilder(G_MUL)
+ .legalFor({S32, S16, V2S16})
+ .clampMaxNumElementsStrict(0, S16, 2)
+ .scalarize(0)
+ .minScalar(0, S16)
+ .widenScalarToNextMultipleOf(0, 32)
+ .custom();
+ }
assert(ST.hasMad64_32());
getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT, G_SADDSAT, G_SSUBSAT})
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 1fa064891a2d..56aabd4f6ab7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -27,7 +27,6 @@ class MachineIRBuilder;
namespace AMDGPU {
struct ImageDimIntrinsicInfo;
}
-/// This class provides the information for the target register banks.
class AMDGPULegalizerInfo final : public LegalizerInfo {
const GCNSubtarget &ST;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
index 7b18e1f805d8..21bfab52c6c4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPostLegalizerCombiner.cpp
@@ -104,6 +104,14 @@ public:
void applyCombineSignExtendInReg(MachineInstr &MI,
MachineInstr *&MatchInfo) const;
+ // Find the s_mul_u64 instructions where the higher bits are either
+ // zero-extended or sign-extended.
+ bool matchCombine_s_mul_u64(MachineInstr &MI, unsigned &NewOpcode) const;
+ // Replace the s_mul_u64 instructions with S_MUL_I64_I32_PSEUDO if the higher
+ // 33 bits are sign extended and with S_MUL_U64_U32_PSEUDO if the higher 32
+ // bits are zero extended.
+ void applyCombine_s_mul_u64(MachineInstr &MI, unsigned &NewOpcode) const;
+
private:
#define GET_GICOMBINER_CLASS_MEMBERS
#define AMDGPUSubtarget GCNSubtarget
@@ -419,6 +427,32 @@ void AMDGPUPostLegalizerCombinerImpl::applyCombineSignExtendInReg(
MI.eraseFromParent();
}
+bool AMDGPUPostLegalizerCombinerImpl::matchCombine_s_mul_u64(
+ MachineInstr &MI, unsigned &NewOpcode) const {
+ Register Src0 = MI.getOperand(1).getReg();
+ Register Src1 = MI.getOperand(2).getReg();
+ if (MRI.getType(Src0) != LLT::scalar(64))
+ return false;
+
+ if (KB->getKnownBits(Src1).countMinLeadingZeros() >= 32 &&
+ KB->getKnownBits(Src0).countMinLeadingZeros() >= 32) {
+ NewOpcode = AMDGPU::G_AMDGPU_S_MUL_U64_U32;
+ return true;
+ }
+
+ if (KB->computeNumSignBits(Src1) >= 33 &&
+ KB->computeNumSignBits(Src0) >= 33) {
+ NewOpcode = AMDGPU::G_AMDGPU_S_MUL_I64_I32;
+ return true;
+ }
+ return false;
+}
+
+void AMDGPUPostLegalizerCombinerImpl::applyCombine_s_mul_u64(
+ MachineInstr &MI, unsigned &NewOpcode) const {
+ Helper.replaceOpcodeWith(MI, NewOpcode);
+}
+
// Pass boilerplate
// ================
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 92182ec06942..ecb7bb9d1d97 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2094,6 +2094,74 @@ bool AMDGPURegisterBankInfo::foldInsertEltToCmpSelect(
return true;
}
+// Break s_mul_u64 into 32-bit vector operations.
+void AMDGPURegisterBankInfo::applyMappingSMULU64(
+ MachineIRBuilder &B, const OperandsMapper &OpdMapper) const {
+ SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
+ SmallVector<Register, 2> Src0Regs(OpdMapper.getVRegs(1));
+ SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2));
+
+ // All inputs are SGPRs, nothing special to do.
+ if (DefRegs.empty()) {
+ assert(Src0Regs.empty() && Src1Regs.empty());
+ applyDefaultMapping(OpdMapper);
+ return;
+ }
+
+ assert(DefRegs.size() == 2);
+ assert(Src0Regs.size() == Src1Regs.size() &&
+ (Src0Regs.empty() || Src0Regs.size() == 2));
+
+ MachineRegisterInfo &MRI = OpdMapper.getMRI();
+ MachineInstr &MI = OpdMapper.getMI();
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT HalfTy = LLT::scalar(32);
+
+ // Depending on where the source registers came from, the generic code may
+ // have decided to split the inputs already or not. If not, we still need to
+ // extract the values.
+
+ if (Src0Regs.empty())
+ split64BitValueForMapping(B, Src0Regs, HalfTy, MI.getOperand(1).getReg());
+ else
+ setRegsToType(MRI, Src0Regs, HalfTy);
+
+ if (Src1Regs.empty())
+ split64BitValueForMapping(B, Src1Regs, HalfTy, MI.getOperand(2).getReg());
+ else
+ setRegsToType(MRI, Src1Regs, HalfTy);
+
+ setRegsToType(MRI, DefRegs, HalfTy);
+
+ // The multiplication is done as follows:
+ //
+ // Op1H Op1L
+ // * Op0H Op0L
+ // --------------------
+ // Op1H*Op0L Op1L*Op0L
+ // + Op1H*Op0H Op1L*Op0H
+ // -----------------------------------------
+ // (Op1H*Op0L + Op1L*Op0H + carry) Op1L*Op0L
+ //
+ // We drop Op1H*Op0H because the result of the multiplication is a 64-bit
+ // value and that would overflow.
+ // The low 32-bit value is Op1L*Op0L.
+ // The high 32-bit value is Op1H*Op0L + Op1L*Op0H + carry (from
+ // Op1L*Op0L).
+
+ ApplyRegBankMapping ApplyBank(B, *this, MRI, &AMDGPU::VGPRRegBank);
+
+ Register Hi = B.buildUMulH(HalfTy, Src0Regs[0], Src1Regs[0]).getReg(0);
+ Register MulLoHi = B.buildMul(HalfTy, Src0Regs[0], Src1Regs[1]).getReg(0);
+ Register Add = B.buildAdd(HalfTy, Hi, MulLoHi).getReg(0);
+ Register MulHiLo = B.buildMul(HalfTy, Src0Regs[1], Src1Regs[0]).getReg(0);
+ B.buildAdd(DefRegs[1], Add, MulHiLo);
+ B.buildMul(DefRegs[0], Src0Regs[0], Src1Regs[0]);
+
+ MRI.setRegBank(DstReg, AMDGPU::VGPRRegBank);
+ MI.eraseFromParent();
+}
+
void AMDGPURegisterBankInfo::applyMappingImpl(
MachineIRBuilder &B, const OperandsMapper &OpdMapper) const {
MachineInstr &MI = OpdMapper.getMI();
@@ -2394,13 +2462,21 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
+ // Special case for s_mul_u64. There is not a vector equivalent of
+ // s_mul_u64. Hence, we have to break down s_mul_u64 into 32-bit vector
+ // multiplications.
+ if (Opc == AMDGPU::G_MUL && DstTy.getSizeInBits() == 64) {
+ applyMappingSMULU64(B, OpdMapper);
+ return;
+ }
+
// 16-bit operations are VALU only, but can be promoted to 32-bit SALU.
// Packed 16-bit operations need to be scalarized and promoted.
if (DstTy != LLT::scalar(16) && DstTy != LLT::fixed_vector(2, 16))
break;
const RegisterBank *DstBank =
- OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
+ OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
if (DstBank == &AMDGPU::VGPRRegBank)
break;
@@ -2451,6 +2527,72 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
return;
}
+ case AMDGPU::G_AMDGPU_S_MUL_I64_I32:
+ case AMDGPU::G_AMDGPU_S_MUL_U64_U32: {
+ // This is a special case for s_mul_u64. We use
+ // G_AMDGPU_S_MUL_I64_I32 opcode to represent an s_mul_u64 operation
+ // where the 33 higher bits are sign-extended and
+ // G_AMDGPU_S_MUL_U64_U32 opcode to represent an s_mul_u64 operation
+ // where the 32 higher bits are zero-extended. In case scalar registers are
+ // selected, both opcodes are lowered as s_mul_u64. If the vector registers
+ // are selected, then G_AMDGPU_S_MUL_I64_I32 and
+ // G_AMDGPU_S_MUL_U64_U32 are lowered with a vector mad instruction.
+
+ // Insert basic copies.
+ applyDefaultMapping(OpdMapper);
+
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg0 = MI.getOperand(1).getReg();
+ Register SrcReg1 = MI.getOperand(2).getReg();
+ const LLT S32 = LLT::scalar(32);
+ const LLT S64 = LLT::scalar(64);
+ assert(MRI.getType(DstReg) == S64 && "This is a special case for s_mul_u64 "
+ "that handles only 64-bit operands.");
+ const RegisterBank *DstBank =
+ OpdMapper.getInstrMapping().getOperandMapping(0).BreakDown[0].RegBank;
+
+ // Replace G_AMDGPU_S_MUL_I64_I32 and G_AMDGPU_S_MUL_U64_U32
+ // with s_mul_u64 operation.
+ if (DstBank == &AMDGPU::SGPRRegBank) {
+ MI.setDesc(TII->get(AMDGPU::S_MUL_U64));
+ MRI.setRegClass(DstReg, &AMDGPU::SGPR_64RegClass);
+ MRI.setRegClass(SrcReg0, &AMDGPU::SGPR_64RegClass);
+ MRI.setRegClass(SrcReg1, &AMDGPU::SGPR_64RegClass);
+ return;
+ }
+
+ // Replace G_AMDGPU_S_MUL_I64_I32 and G_AMDGPU_S_MUL_U64_U32
+ // with a vector mad.
+ assert(MRI.getRegBankOrNull(DstReg) == &AMDGPU::VGPRRegBank &&
+ "The destination operand should be in vector registers.");
+
+ DebugLoc DL = MI.getDebugLoc();
+
+ // Extract the lower subregister from the first operand.
+ Register Op0L = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ MRI.setRegClass(Op0L, &AMDGPU::VGPR_32RegClass);
+ MRI.setType(Op0L, S32);
+ B.buildTrunc(Op0L, SrcReg0);
+
+ // Extract the lower subregister from the second operand.
+ Register Op1L = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ MRI.setRegClass(Op1L, &AMDGPU::VGPR_32RegClass);
+ MRI.setType(Op1L, S32);
+ B.buildTrunc(Op1L, SrcReg1);
+
+ unsigned NewOpc = Opc == AMDGPU::G_AMDGPU_S_MUL_U64_U32
+ ? AMDGPU::G_AMDGPU_MAD_U64_U32
+ : AMDGPU::G_AMDGPU_MAD_I64_I32;
+
+ MachineIRBuilder B(MI);
+ Register Zero64 = B.buildConstant(S64, 0).getReg(0);
+ MRI.setRegClass(Zero64, &AMDGPU::VReg_64RegClass);
+ Register CarryOut = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
+ MRI.setRegClass(CarryOut, &AMDGPU::VReg_64RegClass);
+ B.buildInstr(NewOpc, {DstReg, CarryOut}, {Op0L, Op1L, Zero64});
+ MI.eraseFromParent();
+ return;
+ }
case AMDGPU::G_SEXT_INREG: {
SmallVector<Register, 2> SrcRegs(OpdMapper.getVRegs(1));
if (SrcRegs.empty())
@@ -3669,7 +3811,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_AND:
case AMDGPU::G_OR:
- case AMDGPU::G_XOR: {
+ case AMDGPU::G_XOR:
+ case AMDGPU::G_MUL: {
unsigned Size = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
if (Size == 1) {
const RegisterBank *DstBank
@@ -3737,7 +3880,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_PTRMASK:
case AMDGPU::G_ADD:
case AMDGPU::G_SUB:
- case AMDGPU::G_MUL:
case AMDGPU::G_SHL:
case AMDGPU::G_LSHR:
case AMDGPU::G_ASHR:
@@ -3755,6 +3897,8 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case AMDGPU::G_SHUFFLE_VECTOR:
case AMDGPU::G_SBFX:
case AMDGPU::G_UBFX:
+ case AMDGPU::G_AMDGPU_S_MUL_I64_I32:
+ case AMDGPU::G_AMDGPU_S_MUL_U64_U32:
if (isSALUMapping(MI))
return getDefaultMappingSOP(MI);
return getDefaultMappingVOP(MI);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
index b5d16e70ab23..2bb5ef57fe03 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.h
@@ -84,6 +84,9 @@ public:
bool applyMappingMAD_64_32(MachineIRBuilder &B,
const OperandsMapper &OpdMapper) const;
+ void applyMappingSMULU64(MachineIRBuilder &B,
+ const OperandsMapper &OpdMapper) const;
+
Register handleD16VData(MachineIRBuilder &B, MachineRegisterInfo &MRI,
Register Reg) const;
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index 43d35fa5291c..15a54856cb2e 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -1222,8 +1222,10 @@ defm BUFFER_STORE_FORMAT_D16_HI_X : MUBUF_Pseudo_Stores <
} // End HasD16LoadStore
-def BUFFER_WBINVL1 : MUBUF_Invalidate <"buffer_wbinvl1",
- int_amdgcn_buffer_wbinvl1>;
+let SubtargetPredicate = isNotGFX12Plus in
+def BUFFER_WBINVL1 : MUBUF_Invalidate <
+ "buffer_wbinvl1", int_amdgcn_buffer_wbinvl1
+>;
let SubtargetPredicate = HasAtomicFaddNoRtnInsts in
defm BUFFER_ATOMIC_ADD_F32 : MUBUF_Pseudo_Atomics_NO_RTN<
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 615f8cd54d8f..345564c06af1 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -60,6 +60,7 @@ class FLAT_Pseudo<string opName, dag outs, dag ins,
bits<1> has_sve = 0; // Scratch VGPR Enable
bits<1> lds = 0;
bits<1> sve = 0;
+ bits<1> has_offset = 1;
let SubtargetPredicate = !if(is_flat_global, HasFlatGlobalInsts,
!if(is_flat_scratch, HasFlatScratchInsts, HasFlatAddressSpace));
@@ -182,7 +183,7 @@ class VFLAT_Real <bits<8> op, FLAT_Pseudo ps, string opName = ps.Mnemonic> :
let Inst{51-50} = cpol{4-3}; // scope
let Inst{62-55} = !if(ps.has_data, vdata{7-0}, ?);
let Inst{71-64} = !if(ps.has_vaddr, vaddr, ?);
- let Inst{95-72} = offset;
+ let Inst{95-72} = !if(ps.has_offset, offset, ?);
}
class GlobalSaddrTable <bit is_saddr, string Name = ""> {
@@ -340,6 +341,34 @@ multiclass FLAT_Global_Store_AddTid_Pseudo<string opName, RegisterClass regClass
GlobalSaddrTable<1, opName>;
}
+class FLAT_Global_Invalidate_Writeback<string opName, SDPatternOperator node = null_frag> :
+ FLAT_Pseudo<opName, (outs), (ins CPol:$cpol), "$cpol", [(node)]> {
+
+ let AsmMatchConverter = "";
+
+ let hasSideEffects = 1;
+ let mayLoad = 0;
+ let mayStore = 0;
+ let is_flat_global = 1;
+
+ let has_offset = 0;
+ let has_saddr = 0;
+ let enabled_saddr = 0;
+ let saddr_value = 0;
+ let has_vdst = 0;
+ let has_data = 0;
+ let has_vaddr = 0;
+ let has_glc = 0;
+ let has_dlc = 0;
+ let glcValue = 0;
+ let dlcValue = 0;
+ let has_sccb = 0;
+ let sccbValue = 0;
+ let has_sve = 0;
+ let lds = 0;
+ let sve = 0;
+}
+
class FlatScratchInst <string sv_op, string mode> {
string SVOp = sv_op;
string Mode = mode;
@@ -928,6 +957,10 @@ defm GLOBAL_LOAD_LDS_DWORD : FLAT_Global_Load_LDS_Pseudo <"global_load_lds_dwor
let SubtargetPredicate = isGFX12Plus in {
defm GLOBAL_ATOMIC_ORDERED_ADD_B64 : FLAT_Global_Atomic_Pseudo <"global_atomic_ordered_add_b64", VReg_64, i64>;
+
+ def GLOBAL_INV : FLAT_Global_Invalidate_Writeback<"global_inv">;
+ def GLOBAL_WB : FLAT_Global_Invalidate_Writeback<"global_wb">;
+ def GLOBAL_WBINV : FLAT_Global_Invalidate_Writeback<"global_wbinv">;
} // End SubtargetPredicate = isGFX12Plus
} // End is_flat_global = 1
@@ -2662,6 +2695,10 @@ defm GLOBAL_ATOMIC_MAX_NUM_F32 : VGLOBAL_Real_Atomics_gfx12<0x052, "GLOBAL_A
defm GLOBAL_ATOMIC_ADD_F32 : VGLOBAL_Real_Atomics_gfx12<0x056, "GLOBAL_ATOMIC_ADD_F32", "global_atomic_add_f32">;
defm GLOBAL_ATOMIC_ORDERED_ADD_B64 : VGLOBAL_Real_Atomics_gfx12<0x073, "GLOBAL_ATOMIC_ORDERED_ADD_B64", "global_atomic_ordered_add_b64">;
+defm GLOBAL_INV : VFLAT_Real_Base_gfx12<0x02b, "GLOBAL_INV", "global_inv">;
+defm GLOBAL_WB : VFLAT_Real_Base_gfx12<0x02c, "GLOBAL_WB", "global_wb">;
+defm GLOBAL_WBINV : VFLAT_Real_Base_gfx12<0x04f, "GLOBAL_WBINV", "global_wbinv">;
+
// ENC_VSCRATCH.
defm SCRATCH_LOAD_U8 : VSCRATCH_Real_AllAddr_gfx12<0x10, "SCRATCH_LOAD_UBYTE", "scratch_load_u8", true>;
defm SCRATCH_LOAD_I8 : VSCRATCH_Real_AllAddr_gfx12<0x11, "SCRATCH_LOAD_SBYTE", "scratch_load_i8", true>;
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index ce3164d7b92e..f6f37f5170a4 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -683,6 +683,8 @@ public:
bool hasScalarAddSub64() const { return getGeneration() >= GFX12; }
+ bool hasScalarSMulU64() const { return getGeneration() >= GFX12; }
+
bool hasUnpackedD16VMem() const {
return HasUnpackedD16VMem;
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 079cae06888c..209debb3a105 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -167,8 +167,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::v4bf16, &AMDGPU::SReg_64RegClass);
addRegisterClass(MVT::v8i16, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v8f16, &AMDGPU::SGPR_128RegClass);
+ addRegisterClass(MVT::v8bf16, &AMDGPU::SGPR_128RegClass);
addRegisterClass(MVT::v16i16, &AMDGPU::SGPR_256RegClass);
addRegisterClass(MVT::v16f16, &AMDGPU::SGPR_256RegClass);
+ addRegisterClass(MVT::v16bf16, &AMDGPU::SGPR_256RegClass);
addRegisterClass(MVT::v32i16, &AMDGPU::SGPR_512RegClass);
addRegisterClass(MVT::v32f16, &AMDGPU::SGPR_512RegClass);
}
@@ -310,13 +312,14 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
for (MVT VT :
- {MVT::v8i32, MVT::v8f32, MVT::v9i32, MVT::v9f32, MVT::v10i32,
- MVT::v10f32, MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32,
- MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64, MVT::v4i16,
- MVT::v4f16, MVT::v4bf16, MVT::v3i64, MVT::v3f64, MVT::v6i32,
- MVT::v6f32, MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64,
- MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16, MVT::v16i64,
- MVT::v16f64, MVT::v32i32, MVT::v32f32, MVT::v32i16, MVT::v32f16}) {
+ {MVT::v8i32, MVT::v8f32, MVT::v9i32, MVT::v9f32, MVT::v10i32,
+ MVT::v10f32, MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32,
+ MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64, MVT::v4i16,
+ MVT::v4f16, MVT::v4bf16, MVT::v3i64, MVT::v3f64, MVT::v6i32,
+ MVT::v6f32, MVT::v4i64, MVT::v4f64, MVT::v8i64, MVT::v8f64,
+ MVT::v8i16, MVT::v8f16, MVT::v8bf16, MVT::v16i16, MVT::v16f16,
+ MVT::v16bf16, MVT::v16i64, MVT::v16f64, MVT::v32i32, MVT::v32f32,
+ MVT::v32i16, MVT::v32f16, MVT::v32bf16}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch (Op) {
case ISD::LOAD:
@@ -683,6 +686,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
AddPromotedToType(ISD::LOAD, MVT::v8i16, MVT::v4i32);
setOperationAction(ISD::LOAD, MVT::v8f16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v8f16, MVT::v4i32);
+ setOperationAction(ISD::LOAD, MVT::v8bf16, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v8bf16, MVT::v4i32);
setOperationAction(ISD::STORE, MVT::v4i16, Promote);
AddPromotedToType(ISD::STORE, MVT::v4i16, MVT::v2i32);
@@ -693,16 +698,22 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
AddPromotedToType(ISD::STORE, MVT::v8i16, MVT::v4i32);
setOperationAction(ISD::STORE, MVT::v8f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v8f16, MVT::v4i32);
+ setOperationAction(ISD::STORE, MVT::v8bf16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v8bf16, MVT::v4i32);
setOperationAction(ISD::LOAD, MVT::v16i16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v16i16, MVT::v8i32);
setOperationAction(ISD::LOAD, MVT::v16f16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v16f16, MVT::v8i32);
+ setOperationAction(ISD::LOAD, MVT::v16bf16, Promote);
+ AddPromotedToType(ISD::LOAD, MVT::v16bf16, MVT::v8i32);
setOperationAction(ISD::STORE, MVT::v16i16, Promote);
AddPromotedToType(ISD::STORE, MVT::v16i16, MVT::v8i32);
setOperationAction(ISD::STORE, MVT::v16f16, Promote);
AddPromotedToType(ISD::STORE, MVT::v16f16, MVT::v8i32);
+ setOperationAction(ISD::STORE, MVT::v16bf16, Promote);
+ AddPromotedToType(ISD::STORE, MVT::v16bf16, MVT::v8i32);
setOperationAction(ISD::LOAD, MVT::v32i16, Promote);
AddPromotedToType(ISD::LOAD, MVT::v32i16, MVT::v16i32);
@@ -725,7 +736,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
MVT::v8i32, Expand);
if (!Subtarget->hasVOP3PInsts())
- setOperationAction(ISD::BUILD_VECTOR, {MVT::v2i16, MVT::v2f16}, Custom);
+ setOperationAction(ISD::BUILD_VECTOR,
+ {MVT::v2i16, MVT::v2f16, MVT::v2bf16}, Custom);
setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
// This isn't really legal, but this avoids the legalizer unrolling it (and
@@ -743,8 +755,9 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
{MVT::v4f16, MVT::v8f16, MVT::v16f16, MVT::v32f16},
Expand);
- for (MVT Vec16 : {MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16,
- MVT::v32i16, MVT::v32f16}) {
+ for (MVT Vec16 :
+ {MVT::v8i16, MVT::v8f16, MVT::v8bf16, MVT::v16i16, MVT::v16f16,
+ MVT::v16bf16, MVT::v32i16, MVT::v32f16, MVT::v32bf16}) {
setOperationAction(
{ISD::BUILD_VECTOR, ISD::EXTRACT_VECTOR_ELT, ISD::SCALAR_TO_VECTOR},
Vec16, Custom);
@@ -814,13 +827,17 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}
setOperationAction(ISD::SELECT,
- {MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8,
- MVT::v8i16, MVT::v8f16, MVT::v16i16, MVT::v16f16,
- MVT::v32i16, MVT::v32f16},
+ {MVT::v4i16, MVT::v4f16, MVT::v4bf16, MVT::v2i8, MVT::v4i8,
+ MVT::v8i8, MVT::v8i16, MVT::v8f16, MVT::v8bf16,
+ MVT::v16i16, MVT::v16f16, MVT::v16bf16, MVT::v32i16,
+ MVT::v32f16, MVT::v32bf16},
Custom);
setOperationAction({ISD::SMULO, ISD::UMULO}, MVT::i64, Custom);
+ if (Subtarget->hasScalarSMulU64())
+ setOperationAction(ISD::MUL, MVT::i64, Custom);
+
if (Subtarget->hasMad64_32())
setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32, Custom);
@@ -5431,7 +5448,9 @@ SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
assert(VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 ||
VT == MVT::v8f16 || VT == MVT::v4f32 || VT == MVT::v16i16 ||
VT == MVT::v16f16 || VT == MVT::v8f32 || VT == MVT::v16f32 ||
- VT == MVT::v32f32 || VT == MVT::v32f16 || VT == MVT::v32i16);
+ VT == MVT::v32f32 || VT == MVT::v32f16 || VT == MVT::v32i16 ||
+ VT == MVT::v4bf16 || VT == MVT::v8bf16 || VT == MVT::v16bf16 ||
+ VT == MVT::v32bf16);
SDValue Lo0, Hi0;
SDValue Op0 = Op.getOperand(0);
@@ -5550,7 +5569,6 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SRL:
case ISD::ADD:
case ISD::SUB:
- case ISD::MUL:
case ISD::SMIN:
case ISD::SMAX:
case ISD::UMIN:
@@ -5564,6 +5582,8 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SADDSAT:
case ISD::SSUBSAT:
return splitBinaryVectorOp(Op, DAG);
+ case ISD::MUL:
+ return lowerMUL(Op, DAG);
case ISD::SMULO:
case ISD::UMULO:
return lowerXMULO(Op, DAG);
@@ -6219,6 +6239,66 @@ SDValue SITargetLowering::lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::FLDEXP, DL, VT, Op.getOperand(0), TruncExp);
}
+// Custom lowering for vector multiplications and s_mul_u64.
+SDValue SITargetLowering::lowerMUL(SDValue Op, SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+
+ // Split vector operands.
+ if (VT.isVector())
+ return splitBinaryVectorOp(Op, DAG);
+
+ assert(VT == MVT::i64 && "The following code is a special for s_mul_u64");
+
+ // There are four ways to lower s_mul_u64:
+ //
+ // 1. If all the operands are uniform, then we lower it as it is.
+ //
+ // 2. If the operands are divergent, then we have to split s_mul_u64 in 32-bit
+ // multiplications because there is not a vector equivalent of s_mul_u64.
+ //
+ // 3. If the cost model decides that it is more efficient to use vector
+ // registers, then we have to split s_mul_u64 in 32-bit multiplications.
+ // This happens in splitScalarSMULU64() in SIInstrInfo.cpp .
+ //
+ // 4. If the cost model decides to use vector registers and both of the
+ // operands are zero-extended/sign-extended from 32-bits, then we split the
+ // s_mul_u64 in two 32-bit multiplications. The problem is that it is not
+ // possible to check if the operands are zero-extended or sign-extended in
+ // SIInstrInfo.cpp. For this reason, here, we replace s_mul_u64 with
+ // s_mul_u64_u32_pseudo if both operands are zero-extended and we replace
+ // s_mul_u64 with s_mul_i64_i32_pseudo if both operands are sign-extended.
+ // If the cost model decides that we have to use vector registers, then
+ // splitScalarSMulPseudo() (in SIInstrInfo.cpp) split s_mul_u64_u32/
+ // s_mul_i64_i32_pseudo in two vector multiplications. If the cost model
+ // decides that we should use scalar registers, then s_mul_u64_u32_pseudo/
+ // s_mul_i64_i32_pseudo is lowered as s_mul_u64 in expandPostRAPseudo() in
+ // SIInstrInfo.cpp .
+
+ if (Op->isDivergent())
+ return SDValue();
+
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ // If all the operands are zero-enteted to 32-bits, then we replace s_mul_u64
+ // with s_mul_u64_u32_pseudo. If all the operands are sign-extended to
+ // 32-bits, then we replace s_mul_u64 with s_mul_i64_i32_pseudo.
+ KnownBits Op0KnownBits = DAG.computeKnownBits(Op0);
+ unsigned Op0LeadingZeros = Op0KnownBits.countMinLeadingZeros();
+ KnownBits Op1KnownBits = DAG.computeKnownBits(Op1);
+ unsigned Op1LeadingZeros = Op1KnownBits.countMinLeadingZeros();
+ SDLoc SL(Op);
+ if (Op0LeadingZeros >= 32 && Op1LeadingZeros >= 32)
+ return SDValue(
+ DAG.getMachineNode(AMDGPU::S_MUL_U64_U32_PSEUDO, SL, VT, Op0, Op1), 0);
+ unsigned Op0SignBits = DAG.ComputeNumSignBits(Op0);
+ unsigned Op1SignBits = DAG.ComputeNumSignBits(Op1);
+ if (Op0SignBits >= 33 && Op1SignBits >= 33)
+ return SDValue(
+ DAG.getMachineNode(AMDGPU::S_MUL_I64_I32_PSEUDO, SL, VT, Op0, Op1), 0);
+ // If all the operands are uniform, then we lower s_mul_u64 as it is.
+ return Op;
+}
+
SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
SDLoc SL(Op);
@@ -6854,8 +6934,8 @@ SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
SDLoc SL(Op);
EVT VT = Op.getValueType();
- if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v4bf16 ||
- VT == MVT::v8i16 || VT == MVT::v8f16) {
+ if (VT == MVT::v4i16 || VT == MVT::v4f16 || VT == MVT::v8i16 ||
+ VT == MVT::v8f16 || VT == MVT::v4bf16 || VT == MVT::v8bf16) {
EVT HalfVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(),
VT.getVectorNumElements() / 2);
MVT HalfIntVT = MVT::getIntegerVT(HalfVT.getSizeInBits());
@@ -6878,7 +6958,7 @@ SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
}
- if (VT == MVT::v16i16 || VT == MVT::v16f16) {
+ if (VT == MVT::v16i16 || VT == MVT::v16f16 || VT == MVT::v16bf16) {
EVT QuarterVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(),
VT.getVectorNumElements() / 4);
MVT QuarterIntVT = MVT::getIntegerVT(QuarterVT.getSizeInBits());
@@ -6899,7 +6979,7 @@ SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
return DAG.getNode(ISD::BITCAST, SL, VT, Blend);
}
- if (VT == MVT::v32i16 || VT == MVT::v32f16) {
+ if (VT == MVT::v32i16 || VT == MVT::v32f16 || VT == MVT::v32bf16) {
EVT QuarterVT = MVT::getVectorVT(VT.getVectorElementType().getSimpleVT(),
VT.getVectorNumElements() / 8);
MVT QuarterIntVT = MVT::getIntegerVT(QuarterVT.getSizeInBits());
@@ -14182,11 +14262,11 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
EVT VT = N->getValueType(0);
// v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
- if (VT == MVT::v2i16 || VT == MVT::v2f16) {
+ if (VT == MVT::v2i16 || VT == MVT::v2f16 || VT == MVT::v2f16) {
SDLoc SL(N);
SDValue Src = N->getOperand(0);
EVT EltVT = Src.getValueType();
- if (EltVT == MVT::f16)
+ if (EltVT != MVT::i16)
Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 00f9ddf11ea7..92b38ebade62 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -146,6 +146,7 @@ private:
SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerMUL(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerXMULO(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 55ddb540c51e..1cb1d32707f2 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -1424,6 +1424,12 @@ bool SIInsertWaitcnts::mayAccessScratchThroughFlat(
});
}
+static bool isCacheInvOrWBInst(MachineInstr &Inst) {
+ auto Opc = Inst.getOpcode();
+ return Opc == AMDGPU::GLOBAL_INV || Opc == AMDGPU::GLOBAL_WB ||
+ Opc == AMDGPU::GLOBAL_WBINV;
+}
+
void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
WaitcntBrackets *ScoreBrackets) {
// Now look at the instruction opcode. If it is a memory access
@@ -1439,6 +1445,10 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
}
} else if (TII->isFLAT(Inst)) {
+ // TODO: Track this properly.
+ if (isCacheInvOrWBInst(Inst))
+ return;
+
assert(Inst.mayLoadOrStore());
int FlatASCount = 0;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 67992929ab35..d4c7a457e9aa 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2475,6 +2475,11 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
MI.eraseFromParent();
break;
}
+
+ case AMDGPU::S_MUL_U64_U32_PSEUDO:
+ case AMDGPU::S_MUL_I64_I32_PSEUDO:
+ MI.setDesc(get(AMDGPU::S_MUL_U64));
+ break;
}
return true;
}
@@ -6845,6 +6850,21 @@ void SIInstrInfo::moveToVALUImpl(SIInstrWorklist &Worklist,
// Default handling
break;
}
+
+ case AMDGPU::S_MUL_U64:
+ // Split s_mul_u64 in 32-bit vector multiplications.
+ splitScalarSMulU64(Worklist, Inst, MDT);
+ Inst.eraseFromParent();
+ return;
+
+ case AMDGPU::S_MUL_U64_U32_PSEUDO:
+ case AMDGPU::S_MUL_I64_I32_PSEUDO:
+ // This is a special case of s_mul_u64 where all the operands are either
+ // zero extended or sign extended.
+ splitScalarSMulPseudo(Worklist, Inst, MDT);
+ Inst.eraseFromParent();
+ return;
+
case AMDGPU::S_AND_B64:
splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT);
Inst.eraseFromParent();
@@ -7654,6 +7674,180 @@ void SIInstrInfo::splitScalar64BitUnaryOp(SIInstrWorklist &Worklist,
addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
}
+// There is not a vector equivalent of s_mul_u64. For this reason, we need to
+// split the s_mul_u64 in 32-bit vector multiplications.
+void SIInstrInfo::splitScalarSMulU64(SIInstrWorklist &Worklist,
+ MachineInstr &Inst,
+ MachineDominatorTree *MDT) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+ Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
+ Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src0 = Inst.getOperand(1);
+ MachineOperand &Src1 = Inst.getOperand(2);
+ const DebugLoc &DL = Inst.getDebugLoc();
+ MachineBasicBlock::iterator MII = Inst;
+
+ const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+ const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
+ const TargetRegisterClass *Src0SubRC =
+ RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
+ if (RI.isSGPRClass(Src0SubRC))
+ Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
+ const TargetRegisterClass *Src1SubRC =
+ RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
+ if (RI.isSGPRClass(Src1SubRC))
+ Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
+
+ // First, we extract the low 32-bit and high 32-bit values from each of the
+ // operands.
+ MachineOperand Op0L =
+ buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
+ MachineOperand Op1L =
+ buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
+ MachineOperand Op0H =
+ buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC);
+ MachineOperand Op1H =
+ buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC);
+
+ // The multilication is done as follows:
+ //
+ // Op1H Op1L
+ // * Op0H Op0L
+ // --------------------
+ // Op1H*Op0L Op1L*Op0L
+ // + Op1H*Op0H Op1L*Op0H
+ // -----------------------------------------
+ // (Op1H*Op0L + Op1L*Op0H + carry) Op1L*Op0L
+ //
+ // We drop Op1H*Op0H because the result of the multiplication is a 64-bit
+ // value and that would overflow.
+ // The low 32-bit value is Op1L*Op0L.
+ // The high 32-bit value is Op1H*Op0L + Op1L*Op0H + carry (from Op1L*Op0L).
+
+ Register Op1L_Op0H_Reg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ MachineInstr *Op1L_Op0H =
+ BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), Op1L_Op0H_Reg)
+ .add(Op1L)
+ .add(Op0H);
+
+ Register Op1H_Op0L_Reg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ MachineInstr *Op1H_Op0L =
+ BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), Op1H_Op0L_Reg)
+ .add(Op1H)
+ .add(Op0L);
+
+ Register CarryReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ MachineInstr *Carry =
+ BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_HI_U32_e64), CarryReg)
+ .add(Op1L)
+ .add(Op0L);
+
+ MachineInstr *LoHalf =
+ BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), DestSub0)
+ .add(Op1L)
+ .add(Op0L);
+
+ Register AddReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ MachineInstr *Add = BuildMI(MBB, MII, DL, get(AMDGPU::V_ADD_U32_e32), AddReg)
+ .addReg(Op1L_Op0H_Reg)
+ .addReg(Op1H_Op0L_Reg);
+
+ MachineInstr *HiHalf =
+ BuildMI(MBB, MII, DL, get(AMDGPU::V_ADD_U32_e32), DestSub1)
+ .addReg(AddReg)
+ .addReg(CarryReg);
+
+ BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+
+ MRI.replaceRegWith(Dest.getReg(), FullDestReg);
+
+ // Try to legalize the operands in case we need to swap the order to keep it
+ // valid.
+ legalizeOperands(*Op1L_Op0H, MDT);
+ legalizeOperands(*Op1H_Op0L, MDT);
+ legalizeOperands(*Carry, MDT);
+ legalizeOperands(*LoHalf, MDT);
+ legalizeOperands(*Add, MDT);
+ legalizeOperands(*HiHalf, MDT);
+
+ // Move all users of this moved value.
+ addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
+}
+
+// Lower S_MUL_U64_U32_PSEUDO/S_MUL_I64_I32_PSEUDO in two 32-bit vector
+// multiplications.
+void SIInstrInfo::splitScalarSMulPseudo(SIInstrWorklist &Worklist,
+ MachineInstr &Inst,
+ MachineDominatorTree *MDT) const {
+ MachineBasicBlock &MBB = *Inst.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+
+ Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
+ Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+ Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+
+ MachineOperand &Dest = Inst.getOperand(0);
+ MachineOperand &Src0 = Inst.getOperand(1);
+ MachineOperand &Src1 = Inst.getOperand(2);
+ const DebugLoc &DL = Inst.getDebugLoc();
+ MachineBasicBlock::iterator MII = Inst;
+
+ const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg());
+ const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg());
+ const TargetRegisterClass *Src0SubRC =
+ RI.getSubRegisterClass(Src0RC, AMDGPU::sub0);
+ if (RI.isSGPRClass(Src0SubRC))
+ Src0SubRC = RI.getEquivalentVGPRClass(Src0SubRC);
+ const TargetRegisterClass *Src1SubRC =
+ RI.getSubRegisterClass(Src1RC, AMDGPU::sub0);
+ if (RI.isSGPRClass(Src1SubRC))
+ Src1SubRC = RI.getEquivalentVGPRClass(Src1SubRC);
+
+ // First, we extract the low 32-bit and high 32-bit values from each of the
+ // operands.
+ MachineOperand Op0L =
+ buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC);
+ MachineOperand Op1L =
+ buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC);
+
+ unsigned Opc = Inst.getOpcode();
+ unsigned NewOpc = Opc == AMDGPU::S_MUL_U64_U32_PSEUDO
+ ? AMDGPU::V_MUL_HI_U32_e64
+ : AMDGPU::V_MUL_HI_I32_e64;
+ MachineInstr *HiHalf =
+ BuildMI(MBB, MII, DL, get(NewOpc), DestSub1).add(Op1L).add(Op0L);
+
+ MachineInstr *LoHalf =
+ BuildMI(MBB, MII, DL, get(AMDGPU::V_MUL_LO_U32_e64), DestSub0)
+ .add(Op1L)
+ .add(Op0L);
+
+ BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
+ .addReg(DestSub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(DestSub1)
+ .addImm(AMDGPU::sub1);
+
+ MRI.replaceRegWith(Dest.getReg(), FullDestReg);
+
+ // Try to legalize the operands in case we need to swap the order to keep it
+ // valid.
+ legalizeOperands(*HiHalf, MDT);
+ legalizeOperands(*LoHalf, MDT);
+
+ // Move all users of this moved value.
+ addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
+}
+
void SIInstrInfo::splitScalar64BitBinaryOp(SIInstrWorklist &Worklist,
MachineInstr &Inst, unsigned Opcode,
MachineDominatorTree *MDT) const {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 46eee6fae0a5..37ee159362a2 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -138,6 +138,12 @@ private:
unsigned Opcode,
MachineDominatorTree *MDT = nullptr) const;
+ void splitScalarSMulU64(SIInstrWorklist &Worklist, MachineInstr &Inst,
+ MachineDominatorTree *MDT) const;
+
+ void splitScalarSMulPseudo(SIInstrWorklist &Worklist, MachineInstr &Inst,
+ MachineDominatorTree *MDT) const;
+
void splitScalar64BitXnor(SIInstrWorklist &Worklist, MachineInstr &Inst,
MachineDominatorTree *MDT = nullptr) const;
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index ea2a8b75d074..1cd8a37c3aa9 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1633,6 +1633,37 @@ def : BitConvert <v2f64, v8i16, SReg_128>;
def : BitConvert <v2i64, v8f16, SReg_128>;
def : BitConvert <v2f64, v8f16, SReg_128>;
+def : BitConvert <v4i32, v8bf16, SReg_128>;
+def : BitConvert <v8bf16, v4i32, SReg_128>;
+def : BitConvert <v4i32, v8bf16, VReg_128>;
+def : BitConvert <v8bf16, v4i32, VReg_128>;
+
+def : BitConvert <v4f32, v8bf16, SReg_128>;
+def : BitConvert <v8bf16, v4f32, SReg_128>;
+def : BitConvert <v4f32, v8bf16, VReg_128>;
+def : BitConvert <v8bf16, v4f32, VReg_128>;
+
+def : BitConvert <v8i16, v8bf16, SReg_128>;
+def : BitConvert <v8bf16, v8i16, SReg_128>;
+def : BitConvert <v8i16, v8bf16, VReg_128>;
+def : BitConvert <v8bf16, v8i16, VReg_128>;
+
+def : BitConvert <v8f16, v8bf16, SReg_128>;
+def : BitConvert <v8bf16, v8f16, SReg_128>;
+def : BitConvert <v8f16, v8bf16, VReg_128>;
+def : BitConvert <v8bf16, v8f16, VReg_128>;
+
+def : BitConvert <v2f64, v8bf16, SReg_128>;
+def : BitConvert <v8bf16, v2f64, SReg_128>;
+def : BitConvert <v2f64, v8bf16, VReg_128>;
+def : BitConvert <v8bf16, v2f64, VReg_128>;
+
+def : BitConvert <v2i64, v8bf16, SReg_128>;
+def : BitConvert <v8bf16, v2i64, SReg_128>;
+def : BitConvert <v2i64, v8bf16, VReg_128>;
+def : BitConvert <v8bf16, v2i64, VReg_128>;
+
+
// 160-bit bitcast
def : BitConvert <v5i32, v5f32, SReg_160>;
def : BitConvert <v5f32, v5i32, SReg_160>;
@@ -1697,6 +1728,31 @@ def : BitConvert <v4i64, v16i16, VReg_256>;
def : BitConvert <v4f64, v16f16, VReg_256>;
def : BitConvert <v4f64, v16i16, VReg_256>;
+
+def : BitConvert <v8i32, v16bf16, VReg_256>;
+def : BitConvert <v16bf16, v8i32, VReg_256>;
+def : BitConvert <v8f32, v16bf16, VReg_256>;
+def : BitConvert <v16bf16, v8f32, VReg_256>;
+def : BitConvert <v4i64, v16bf16, VReg_256>;
+def : BitConvert <v16bf16, v4i64, VReg_256>;
+def : BitConvert <v4f64, v16bf16, VReg_256>;
+def : BitConvert <v16bf16, v4f64, VReg_256>;
+
+
+
+def : BitConvert <v16i16, v16bf16, SReg_256>;
+def : BitConvert <v16bf16, v16i16, SReg_256>;
+def : BitConvert <v16i16, v16bf16, VReg_256>;
+def : BitConvert <v16bf16, v16i16, VReg_256>;
+
+def : BitConvert <v16f16, v16bf16, SReg_256>;
+def : BitConvert <v16bf16, v16f16, SReg_256>;
+def : BitConvert <v16f16, v16bf16, VReg_256>;
+def : BitConvert <v16bf16, v16f16, VReg_256>;
+
+
+
+
// 288-bit bitcast
def : BitConvert <v9i32, v9f32, SReg_288>;
def : BitConvert <v9f32, v9i32, SReg_288>;
@@ -3797,6 +3853,18 @@ def G_AMDGPU_S_BUFFER_LOAD : AMDGPUGenericInstruction {
let mayStore = 0;
}
+def G_AMDGPU_S_MUL_U64_U32 : AMDGPUGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src0, type0:$src1);
+ let hasSideEffects = 0;
+}
+
+def G_AMDGPU_S_MUL_I64_I32 : AMDGPUGenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type0:$src0, type0:$src1);
+ let hasSideEffects = 0;
+}
+
// This is equivalent to the G_INTRINSIC*, but the operands may have
// been legalized depending on the subtarget requirements.
def G_AMDGPU_INTRIN_IMAGE_LOAD : AMDGPUGenericInstruction {
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 10ec54d3317f..6d749ad1ad24 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -578,6 +578,14 @@ public:
bool IsNonTemporal) const override;
};
+class SIGfx12CacheControl : public SIGfx11CacheControl {
+public:
+ SIGfx12CacheControl(const GCNSubtarget &ST) : SIGfx11CacheControl(ST) {}
+
+ bool insertAcquire(MachineBasicBlock::iterator &MI, SIAtomicScope Scope,
+ SIAtomicAddrSpace AddrSpace, Position Pos) const override;
+};
+
class SIMemoryLegalizer final : public MachineFunctionPass {
private:
@@ -857,7 +865,9 @@ std::unique_ptr<SICacheControl> SICacheControl::create(const GCNSubtarget &ST) {
return std::make_unique<SIGfx7CacheControl>(ST);
if (Generation < AMDGPUSubtarget::GFX11)
return std::make_unique<SIGfx10CacheControl>(ST);
- return std::make_unique<SIGfx11CacheControl>(ST);
+ if (Generation < AMDGPUSubtarget::GFX12)
+ return std::make_unique<SIGfx11CacheControl>(ST);
+ return std::make_unique<SIGfx12CacheControl>(ST);
}
bool SIGfx6CacheControl::enableLoadCacheBypass(
@@ -1423,7 +1433,7 @@ bool SIGfx90ACacheControl::insertRelease(MachineBasicBlock::iterator &MI,
bool Changed = false;
MachineBasicBlock &MBB = *MI->getParent();
- DebugLoc DL = MI->getDebugLoc();
+ const DebugLoc &DL = MI->getDebugLoc();
if (Pos == Position::AFTER)
++MI;
@@ -2132,6 +2142,62 @@ bool SIGfx11CacheControl::enableVolatileAndOrNonTemporal(
return Changed;
}
+bool SIGfx12CacheControl::insertAcquire(MachineBasicBlock::iterator &MI,
+ SIAtomicScope Scope,
+ SIAtomicAddrSpace AddrSpace,
+ Position Pos) const {
+ if (!InsertCacheInv)
+ return false;
+
+ MachineBasicBlock &MBB = *MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+
+ /// The scratch address space does not need the global memory cache
+ /// to be flushed as all memory operations by the same thread are
+ /// sequentially consistent, and no other thread can access scratch
+ /// memory.
+
+ /// Other address spaces do not have a cache.
+ if ((AddrSpace & SIAtomicAddrSpace::GLOBAL) == SIAtomicAddrSpace::NONE)
+ return false;
+
+ AMDGPU::CPol::CPol ScopeImm = AMDGPU::CPol::SCOPE_DEV;
+ switch (Scope) {
+ case SIAtomicScope::SYSTEM:
+ ScopeImm = AMDGPU::CPol::SCOPE_SYS;
+ break;
+ case SIAtomicScope::AGENT:
+ ScopeImm = AMDGPU::CPol::SCOPE_DEV;
+ break;
+ case SIAtomicScope::WORKGROUP:
+ // In WGP mode the waves of a work-group can be executing on either CU of
+ // the WGP. Therefore we need to invalidate the L0 which is per CU.
+ // Otherwise in CU mode all waves of a work-group are on the same CU, and so
+ // the L0 does not need to be invalidated.
+ if (ST.isCuModeEnabled())
+ return false;
+
+ ScopeImm = AMDGPU::CPol::SCOPE_SE;
+ break;
+ case SIAtomicScope::WAVEFRONT:
+ case SIAtomicScope::SINGLETHREAD:
+ // No cache to invalidate.
+ return false;
+ default:
+ llvm_unreachable("Unsupported synchronization scope");
+ }
+
+ if (Pos == Position::AFTER)
+ ++MI;
+
+ BuildMI(MBB, MI, DL, TII->get(AMDGPU::GLOBAL_INV)).addImm(ScopeImm);
+
+ if (Pos == Position::AFTER)
+ --MI;
+
+ return true;
+}
+
bool SIMemoryLegalizer::removeAtomicPseudoMIs() {
if (AtomicPseudoMIs.empty())
return false;
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index c9687ac368d3..5f021307e18e 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -673,6 +673,16 @@ let SubtargetPredicate = isGFX12Plus in {
let isCommutable = 1;
}
+ // The higher 32-bits of the inputs contain the sign extension bits.
+ def S_MUL_I64_I32_PSEUDO : SPseudoInstSI <
+ (outs SReg_64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
+ >;
+
+ // The higher 32-bits of the inputs are zero.
+ def S_MUL_U64_U32_PSEUDO : SPseudoInstSI <
+ (outs SReg_64:$sdst), (ins SSrc_b64:$src0, SSrc_b64:$src1)
+ >;
+
} // End SubtargetPredicate = isGFX12Plus
let Uses = [SCC] in {
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 0aa62ea77b11..ecee61daa1c8 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -1300,7 +1300,7 @@ class VOP2_DPP8<bits<6> op, VOP2_Pseudo ps,
let OtherPredicates = ps.OtherPredicates;
}
-
+
class VOP2_DPP8_Gen<bits<6> op, VOP2_Pseudo ps, GFXGen Gen,
VOPProfile p = ps.Pfl> :
VOP2_DPP8<op, ps, p> {
diff --git a/llvm/lib/Target/ARM/ARMLegalizerInfo.h b/llvm/lib/Target/ARM/ARMLegalizerInfo.h
index 3636cc6402b8..d6ce4eb1055b 100644
--- a/llvm/lib/Target/ARM/ARMLegalizerInfo.h
+++ b/llvm/lib/Target/ARM/ARMLegalizerInfo.h
@@ -23,7 +23,6 @@ namespace llvm {
class ARMSubtarget;
-/// This class provides the information for the target register banks.
class ARMLegalizerInfo : public LegalizerInfo {
public:
ARMLegalizerInfo(const ARMSubtarget &ST);
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 2cfb2c1d7811..3e75b9fa5230 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -762,12 +762,13 @@ static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
template <class NodeTy>
SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
+ CodeModel::Model M,
bool IsLocal) const {
SDLoc DL(N);
EVT Ty = getPointerTy(DAG.getDataLayout());
SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
- switch (DAG.getTarget().getCodeModel()) {
+ switch (M) {
default:
report_fatal_error("Unsupported code model");
@@ -808,24 +809,35 @@ SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
SDValue LoongArchTargetLowering::lowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
- return getAddr(cast<BlockAddressSDNode>(Op), DAG);
+ return getAddr(cast<BlockAddressSDNode>(Op), DAG,
+ DAG.getTarget().getCodeModel());
}
SDValue LoongArchTargetLowering::lowerJumpTable(SDValue Op,
SelectionDAG &DAG) const {
- return getAddr(cast<JumpTableSDNode>(Op), DAG);
+ return getAddr(cast<JumpTableSDNode>(Op), DAG,
+ DAG.getTarget().getCodeModel());
}
SDValue LoongArchTargetLowering::lowerConstantPool(SDValue Op,
SelectionDAG &DAG) const {
- return getAddr(cast<ConstantPoolSDNode>(Op), DAG);
+ return getAddr(cast<ConstantPoolSDNode>(Op), DAG,
+ DAG.getTarget().getCodeModel());
}
SDValue LoongArchTargetLowering::lowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
assert(N->getOffset() == 0 && "unexpected offset in global node");
- return getAddr(N, DAG, N->getGlobal()->isDSOLocal());
+ auto CM = DAG.getTarget().getCodeModel();
+ const GlobalValue *GV = N->getGlobal();
+
+ if (GV->isDSOLocal() && isa<GlobalVariable>(GV)) {
+ if (auto GCM = dyn_cast<GlobalVariable>(GV)->getCodeModel())
+ CM = *GCM;
+ }
+
+ return getAddr(N, DAG, CM, GV->isDSOLocal());
}
SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 2875aa82e424..72182623b2c3 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -254,7 +254,8 @@ private:
LoongArchCCAssignFn Fn) const;
template <class NodeTy>
- SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
+ SDValue getAddr(NodeTy *N, SelectionDAG &DAG, CodeModel::Model M,
+ bool IsLocal = true) const;
SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
unsigned Opc, bool Large = false) const;
SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
diff --git a/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h b/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h
index a10401ed1a9a..cbe30ec494c9 100644
--- a/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h
+++ b/llvm/lib/Target/M68k/GISel/M68kLegalizerInfo.h
@@ -20,7 +20,6 @@ namespace llvm {
class M68kSubtarget;
-/// This struct provides the information for the target register banks.
struct M68kLegalizerInfo : public LegalizerInfo {
public:
M68kLegalizerInfo(const M68kSubtarget &ST);
diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
index 375e63654db1..8a37e40414ee 100644
--- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td
@@ -270,12 +270,15 @@ def CR7 : CR<7, "cr7", [CR7LT, CR7GT, CR7EQ, CR7UN]>, DwarfRegNum<[75, 75]>;
// Link register
def LR : SPR<8, "lr">, DwarfRegNum<[-2, 65]>;
-//let Aliases = [LR] in
-def LR8 : SPR<8, "lr">, DwarfRegNum<[65, -2]>;
+def LR8 : SPR<8, "lr">, DwarfRegNum<[65, -2]> {
+ let Aliases = [LR];
+}
// Count register
def CTR : SPR<9, "ctr">, DwarfRegNum<[-2, 66]>;
-def CTR8 : SPR<9, "ctr">, DwarfRegNum<[66, -2]>;
+def CTR8 : SPR<9, "ctr">, DwarfRegNum<[66, -2]> {
+ let Aliases = [CTR];
+}
// VRsave register
def VRSAVE: SPR<256, "vrsave">, DwarfRegNum<[109]>;
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 23c2b63c8c83..d616aaeddf41 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -2035,9 +2035,8 @@ ParseStatus RISCVAsmParser::parseCallSymbol(OperandVector &Operands) {
SMLoc E = SMLoc::getFromPointer(S.getPointer() + Identifier.size());
- RISCVMCExpr::VariantKind Kind = RISCVMCExpr::VK_RISCV_CALL;
- if (Identifier.consume_back("@plt"))
- Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
+ RISCVMCExpr::VariantKind Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
+ (void)Identifier.consume_back("@plt");
MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 50ed85acdec0..697ad476ff8c 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -579,7 +579,7 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
// Select the recommended relocation type R_RISCV_CALL_PLT.
if (!Info.Callee.isReg())
- Info.Callee.setTargetFlags(RISCVII::MO_PLT);
+ Info.Callee.setTargetFlags(RISCVII::MO_CALL);
MachineInstrBuilder Call =
MIRBuilder
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 61bae5864925..ab8070772fe5 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -113,7 +113,7 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
- if (ST.hasStdExtZbb())
+ if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
else
BSWAPActions.maxScalar(0, sXLen).lower();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 4335bd0cbbff..f3ec6be16734 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -21,7 +21,6 @@ class GISelChangeObserver;
class MachineIRBuilder;
class RISCVSubtarget;
-/// This class provides the information for the target register banks.
class RISCVLegalizerInfo : public LegalizerInfo {
const RISCVSubtarget &STI;
const unsigned XLen;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index c32210fc1419..433e2e6f80bd 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -254,7 +254,6 @@ static inline bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc) {
enum {
MO_None = 0,
MO_CALL = 1,
- MO_PLT = 2,
MO_LO = 3,
MO_HI = 4,
MO_PCREL_LO = 5,
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
index d67351102bc1..64ddae61b1bc 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
@@ -41,8 +41,6 @@ void RISCVMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
if (HasVariant)
OS << '%' << getVariantKindName(getKind()) << '(';
Expr->print(OS, MAI);
- if (Kind == VK_RISCV_CALL_PLT)
- OS << "@plt";
if (HasVariant)
OS << ')';
}
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index 0fd514fa87cd..f2bd5118fc07 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -747,9 +747,6 @@ static MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym,
Kind = RISCVMCExpr::VK_RISCV_None;
break;
case RISCVII::MO_CALL:
- Kind = RISCVMCExpr::VK_RISCV_CALL;
- break;
- case RISCVII::MO_PLT:
Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
break;
case RISCVII::MO_LO:
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 7257c2e8fe1f..0d8688ba2eae 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -763,8 +763,8 @@ bool RISCVDAGToDAGISel::tryIndexedLoad(SDNode *Node) {
return false;
EVT LoadVT = Ld->getMemoryVT();
- assert(AM == ISD::PRE_INC ||
- AM == ISD::POST_INC && "Unexpected addressing mode");
+ assert((AM == ISD::PRE_INC || AM == ISD::POST_INC) &&
+ "Unexpected addressing mode");
bool IsPre = AM == ISD::PRE_INC;
bool IsPost = AM == ISD::POST_INC;
int64_t Offset = C->getSExtValue();
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index bc4b2b022c0a..79c16cf4c4c3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18215,20 +18215,9 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
// split it and then direct call can be matched by PseudoCALL.
if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
const GlobalValue *GV = S->getGlobal();
-
- unsigned OpFlags = RISCVII::MO_CALL;
- if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
- OpFlags = RISCVII::MO_PLT;
-
- Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
+ Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, RISCVII::MO_CALL);
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- unsigned OpFlags = RISCVII::MO_CALL;
-
- if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
- nullptr))
- OpFlags = RISCVII::MO_PLT;
-
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, RISCVII::MO_CALL);
}
// The first call operand is the chain and the second is the target address.
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 3400b24e0abb..e591aa935c0b 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1381,6 +1381,11 @@ void RISCVInsertVSETVLI::doPRE(MachineBasicBlock &MBB) {
if (!UnavailablePred || !AvailableInfo.isValid())
return;
+ // If we don't know the exact VTYPE, we can't copy the vsetvli to the exit of
+ // the unavailable pred.
+ if (AvailableInfo.hasSEWLMULRatioOnly())
+ return;
+
// Critical edge - TODO: consider splitting?
if (UnavailablePred->succ_size() != 1)
return;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index cd98438eed88..7f6a045a7d04 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -2365,7 +2365,6 @@ RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
using namespace RISCVII;
static const std::pair<unsigned, const char *> TargetFlags[] = {
{MO_CALL, "riscv-call"},
- {MO_PLT, "riscv-plt"},
{MO_LO, "riscv-lo"},
{MO_HI, "riscv-hi"},
{MO_PCREL_LO, "riscv-pcrel-lo"},
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 35e8edf5d2fa..2f4744529469 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -729,22 +729,6 @@ def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", "">,
let imm12 = 0b110000000000;
}
-let Predicates = [HasStdExtZawrs] in {
-def WRS_NTO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.nto", "">,
- Sched<[]> {
- let rs1 = 0;
- let rd = 0;
- let imm12 = 0b000000001101;
-}
-
-def WRS_STO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.sto", "">,
- Sched<[]> {
- let rs1 = 0;
- let rd = 0;
- let imm12 = 0b000000011101;
-}
-} // Predicates = [HasStdExtZawrs]
-
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
def CSRRW : CSR_ir<0b001, "csrrw">;
@@ -2095,6 +2079,7 @@ include "RISCVInstrInfoM.td"
// Atomic
include "RISCVInstrInfoA.td"
+include "RISCVInstrInfoZa.td"
// Scalar FP
include "RISCVInstrInfoF.td"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
index c8301fcc6b93..4d0567e41abc 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -7,8 +7,7 @@
//===----------------------------------------------------------------------===//
//
// This file describes the RISC-V instructions from the standard 'A', Atomic
-// Instructions extension as well as the experimental 'Zacas' (Atomic
-// Compare-and-Swap) extension.
+// Instructions extension.
//
//===----------------------------------------------------------------------===//
@@ -96,15 +95,6 @@ defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
} // Predicates = [HasStdExtA, IsRV64]
-let Predicates = [HasStdExtZacas] in {
-defm AMOCAS_W : AMO_rr_aq_rl<0b00101, 0b010, "amocas.w">;
-defm AMOCAS_D : AMO_rr_aq_rl<0b00101, 0b011, "amocas.d">;
-} // Predicates = [HasStdExtZacas]
-
-let Predicates = [HasStdExtZacas, IsRV64] in {
-defm AMOCAS_Q : AMO_rr_aq_rl<0b00101, 0b100, "amocas.q">;
-} // Predicates = [HasStdExtZacas, IsRV64]
-
//===----------------------------------------------------------------------===//
// Pseudo-instructions and codegen patterns
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 30deeaa06448..fcb18b67623e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6719,12 +6719,14 @@ defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
// 15.2. Vector mask population count vcpop
//===----------------------------------------------------------------------===//
+let IsSignExtendingOpW = 1 in
defm PseudoVCPOP: VPseudoVPOP_M;
//===----------------------------------------------------------------------===//
// 15.3. vfirst find-first-set mask bit
//===----------------------------------------------------------------------===//
+let IsSignExtendingOpW = 1 in
defm PseudoVFIRST: VPseudoV1ST_M;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZa.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZa.td
new file mode 100644
index 000000000000..a09f5715b24f
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZa.td
@@ -0,0 +1,44 @@
+//===-- RISCVInstrInfoZa.td - RISC-V Atomic instructions ---*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the RISC-V instructions from the standard atomic 'Za*'
+// extensions:
+// - Zawrs (v1.0) : Wait-on-Reservation-Set.
+// - Zacas (v1.0-rc1) : Atomic Compare-and-Swap.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Zacas (Atomic Compare-and-Swap)
+//===----------------------------------------------------------------------===//
+
+let Predicates = [HasStdExtZacas] in {
+defm AMOCAS_W : AMO_rr_aq_rl<0b00101, 0b010, "amocas.w">;
+defm AMOCAS_D : AMO_rr_aq_rl<0b00101, 0b011, "amocas.d">;
+} // Predicates = [HasStdExtZacas]
+
+let Predicates = [HasStdExtZacas, IsRV64] in {
+defm AMOCAS_Q : AMO_rr_aq_rl<0b00101, 0b100, "amocas.q">;
+} // Predicates = [HasStdExtZacas, IsRV64]
+
+//===----------------------------------------------------------------------===//
+// Zawrs (Wait-on-Reservation-Set)
+//===----------------------------------------------------------------------===//
+
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+class WRSInst<bits<12> funct12, string opcodestr>
+ : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), opcodestr, ""> {
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = funct12;
+}
+
+let Predicates = [HasStdExtZawrs] in {
+def WRS_NTO : WRSInst<0b000000001101, "wrs.nto">, Sched<[]>;
+def WRS_STO : WRSInst<0b000000011101, "wrs.sto">, Sched<[]>;
+} // Predicates = [HasStdExtZawrs]
diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
index 429b08e199cd..cbc16fa98661 100644
--- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizer.cpp
@@ -607,6 +607,40 @@ static void processSwitches(MachineFunction &MF, SPIRVGlobalRegistry *GR,
}
}
+static bool isImplicitFallthrough(MachineBasicBlock &MBB) {
+ if (MBB.empty())
+ return true;
+
+ // Branching SPIR-V intrinsics are not detected by this generic method.
+ // Thus, we can only trust negative result.
+ if (!MBB.canFallThrough())
+ return false;
+
+ // Otherwise, we must manually check if we have a SPIR-V intrinsic which
+ // prevent an implicit fallthrough.
+ for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
+ It != E; ++It) {
+ if (isSpvIntrinsic(*It, Intrinsic::spv_switch))
+ return false;
+ }
+ return true;
+}
+
+static void removeImplicitFallthroughs(MachineFunction &MF,
+ MachineIRBuilder MIB) {
+ // It is valid for MachineBasicBlocks to not finish with a branch instruction.
+ // In such cases, they will simply fallthrough their immediate successor.
+ for (MachineBasicBlock &MBB : MF) {
+ if (!isImplicitFallthrough(MBB))
+ continue;
+
+ assert(std::distance(MBB.successors().begin(), MBB.successors().end()) ==
+ 1);
+ MIB.setInsertPt(MBB, MBB.end());
+ MIB.buildBr(**MBB.successors().begin());
+ }
+}
+
bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) {
// Initialize the type registry.
const SPIRVSubtarget &ST = MF.getSubtarget<SPIRVSubtarget>();
@@ -619,6 +653,7 @@ bool SPIRVPreLegalizer::runOnMachineFunction(MachineFunction &MF) {
generateAssignInstrs(MF, GR, MIB);
processSwitches(MF, GR, MIB);
processInstrsWithTypeFolding(MF, GR, MIB);
+ removeImplicitFallthroughs(MF, MIB);
return true;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
index 1503f263e42c..62d9090d289f 100644
--- a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
@@ -29,6 +29,7 @@
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Pass.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Utils.h"
#include <optional>
using namespace llvm;
@@ -151,6 +152,19 @@ TargetPassConfig *SPIRVTargetMachine::createPassConfig(PassManagerBase &PM) {
}
void SPIRVPassConfig::addIRPasses() {
+ if (TM.getSubtargetImpl()->isVulkanEnv()) {
+ // Once legalized, we need to structurize the CFG to follow the spec.
+ // This is done through the following 8 steps.
+ // TODO(#75801): add the remaining steps.
+
+ // 1. Simplify loop for subsequent transformations. After this steps, loops
+ // have the following properties:
+ // - loops have a single entry edge (pre-header to loop header).
+ // - all loop exits are dominated by the loop pre-header.
+ // - loops have a single back-edge.
+ addPass(createLoopSimplifyPass());
+ }
+
TargetPassConfig::addIRPasses();
addPass(createSPIRVRegularizerPass());
addPass(createSPIRVPrepareFunctionsPass(TM));
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index 1c0e8d84e2fd..d4f7d8e89af5 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -228,8 +228,8 @@ uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI) {
return MI->getOperand(1).getCImm()->getValue().getZExtValue();
}
-bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID) {
- if (auto *GI = dyn_cast<GIntrinsic>(&MI))
+bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID) {
+ if (const auto *GI = dyn_cast<GIntrinsic>(&MI))
return GI->is(IntrinsicID);
return false;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h
index 30fae6c7de47..60742e2f2728 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.h
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h
@@ -79,7 +79,7 @@ MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI);
// Check if MI is a SPIR-V specific intrinsic call.
-bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID);
+bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID);
// Get type of i-th operand of the metadata node.
Type *getMDOperandAsType(const MDNode *N, unsigned I);
diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt
index 0b7a98ad6341..4d6300cad2a7 100644
--- a/llvm/lib/Target/X86/CMakeLists.txt
+++ b/llvm/lib/Target/X86/CMakeLists.txt
@@ -8,7 +8,7 @@ tablegen(LLVM X86GenAsmWriter1.inc -gen-asm-writer -asmwriternum=1)
tablegen(LLVM X86GenCallingConv.inc -gen-callingconv)
tablegen(LLVM X86GenDAGISel.inc -gen-dag-isel)
tablegen(LLVM X86GenDisassemblerTables.inc -gen-disassembler)
-tablegen(LLVM X86GenEVEX2VEXTables.inc -gen-x86-EVEX2VEX-tables)
+tablegen(LLVM X86GenCompressEVEXTables.inc -gen-x86-compress-evex-tables)
tablegen(LLVM X86GenExegesis.inc -gen-exegesis)
tablegen(LLVM X86GenFastISel.inc -gen-fast-isel)
tablegen(LLVM X86GenGlobalISel.inc -gen-global-isel)
@@ -61,7 +61,7 @@ set(sources
X86InstrFMA3Info.cpp
X86InstrFoldTables.cpp
X86InstrInfo.cpp
- X86EvexToVex.cpp
+ X86CompressEVEX.cpp
X86LoadValueInjectionLoadHardening.cpp
X86LoadValueInjectionRetHardening.cpp
X86MCInstLower.cpp
diff --git a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h
index 1f69feceae27..12134f7b00f1 100644
--- a/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h
+++ b/llvm/lib/Target/X86/GISel/X86LegalizerInfo.h
@@ -21,7 +21,6 @@ namespace llvm {
class X86Subtarget;
class X86TargetMachine;
-/// This class provides the information for the target register banks.
class X86LegalizerInfo : public LegalizerInfo {
private:
/// Keep a reference to the X86Subtarget around so that we can
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index e006dd877360..304b998e1f26 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -148,25 +148,21 @@ classifyFirstOpcodeInMacroFusion(unsigned Opcode) {
case X86::AND16ri8:
case X86::AND16rm:
case X86::AND16rr:
- case X86::AND16rr_REV:
case X86::AND32i32:
case X86::AND32ri:
case X86::AND32ri8:
case X86::AND32rm:
case X86::AND32rr:
- case X86::AND32rr_REV:
case X86::AND64i32:
case X86::AND64ri32:
case X86::AND64ri8:
case X86::AND64rm:
case X86::AND64rr:
- case X86::AND64rr_REV:
case X86::AND8i8:
case X86::AND8ri:
case X86::AND8ri8:
case X86::AND8rm:
case X86::AND8rr:
- case X86::AND8rr_REV:
return FirstMacroFusionInstKind::And;
// CMP
case X86::CMP16i16:
@@ -175,28 +171,24 @@ classifyFirstOpcodeInMacroFusion(unsigned Opcode) {
case X86::CMP16ri8:
case X86::CMP16rm:
case X86::CMP16rr:
- case X86::CMP16rr_REV:
case X86::CMP32i32:
case X86::CMP32mr:
case X86::CMP32ri:
case X86::CMP32ri8:
case X86::CMP32rm:
case X86::CMP32rr:
- case X86::CMP32rr_REV:
case X86::CMP64i32:
case X86::CMP64mr:
case X86::CMP64ri32:
case X86::CMP64ri8:
case X86::CMP64rm:
case X86::CMP64rr:
- case X86::CMP64rr_REV:
case X86::CMP8i8:
case X86::CMP8mr:
case X86::CMP8ri:
case X86::CMP8ri8:
case X86::CMP8rm:
case X86::CMP8rr:
- case X86::CMP8rr_REV:
return FirstMacroFusionInstKind::Cmp;
// ADD
case X86::ADD16i16:
@@ -204,50 +196,42 @@ classifyFirstOpcodeInMacroFusion(unsigned Opcode) {
case X86::ADD16ri8:
case X86::ADD16rm:
case X86::ADD16rr:
- case X86::ADD16rr_REV:
case X86::ADD32i32:
case X86::ADD32ri:
case X86::ADD32ri8:
case X86::ADD32rm:
case X86::ADD32rr:
- case X86::ADD32rr_REV:
case X86::ADD64i32:
case X86::ADD64ri32:
case X86::ADD64ri8:
case X86::ADD64rm:
case X86::ADD64rr:
- case X86::ADD64rr_REV:
case X86::ADD8i8:
case X86::ADD8ri:
case X86::ADD8ri8:
case X86::ADD8rm:
case X86::ADD8rr:
- case X86::ADD8rr_REV:
// SUB
case X86::SUB16i16:
case X86::SUB16ri:
case X86::SUB16ri8:
case X86::SUB16rm:
case X86::SUB16rr:
- case X86::SUB16rr_REV:
case X86::SUB32i32:
case X86::SUB32ri:
case X86::SUB32ri8:
case X86::SUB32rm:
case X86::SUB32rr:
- case X86::SUB32rr_REV:
case X86::SUB64i32:
case X86::SUB64ri32:
case X86::SUB64ri8:
case X86::SUB64rm:
case X86::SUB64rr:
- case X86::SUB64rr_REV:
case X86::SUB8i8:
case X86::SUB8ri:
case X86::SUB8ri8:
case X86::SUB8rm:
case X86::SUB8rr:
- case X86::SUB8rr_REV:
return FirstMacroFusionInstKind::AddSub;
// INC
case X86::INC16r:
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index 485afbc1dfbc..21623a805f55 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -131,9 +131,9 @@ FunctionPass *createX86FixupBWInsts();
/// to another, when profitable.
FunctionPass *createX86DomainReassignmentPass();
-/// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
-/// encoding when possible in order to reduce code size.
-FunctionPass *createX86EvexToVexInsts();
+/// This pass compress instructions from EVEX space to legacy/VEX/EVEX space when
+/// possible in order to reduce code size or facilitate HW decoding.
+FunctionPass *createX86CompressEVEXPass();
/// This pass creates the thunks for the retpoline feature.
FunctionPass *createX86IndirectThunksPass();
@@ -167,7 +167,7 @@ FunctionPass *createX86SpeculativeLoadHardeningPass();
FunctionPass *createX86SpeculativeExecutionSideEffectSuppression();
FunctionPass *createX86ArgumentStackSlotPass();
-void initializeEvexToVexInstPassPass(PassRegistry &);
+void initializeCompressEVEXPassPass(PassRegistry &);
void initializeFPSPass(PassRegistry &);
void initializeFixupBWInstPassPass(PassRegistry &);
void initializeFixupLEAPassPass(PassRegistry &);
diff --git a/llvm/lib/Target/X86/X86EvexToVex.cpp b/llvm/lib/Target/X86/X86CompressEVEX.cpp
index c425c37b4186..b95baddd9dea 100644
--- a/llvm/lib/Target/X86/X86EvexToVex.cpp
+++ b/llvm/lib/Target/X86/X86CompressEVEX.cpp
@@ -1,5 +1,4 @@
-//===- X86EvexToVex.cpp ---------------------------------------------------===//
-// Compress EVEX instructions to VEX encoding when possible to reduce code size
+//===- X86CompressEVEX.cpp ------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -7,17 +6,30 @@
//
//===----------------------------------------------------------------------===//
//
-/// \file
-/// This file defines the pass that goes over all AVX-512 instructions which
-/// are encoded using the EVEX prefix and if possible replaces them by their
-/// corresponding VEX encoding which is usually shorter by 2 bytes.
-/// EVEX instructions may be encoded via the VEX prefix when the AVX-512
-/// instruction has a corresponding AVX/AVX2 opcode, when vector length
-/// accessed by instruction is less than 512 bits and when it does not use
-// the xmm or the mask registers or xmm/ymm registers with indexes higher
-// than 15.
-/// The pass applies code reduction on the generated code for AVX-512 instrs.
+// This pass compresses instructions from EVEX space to legacy/VEX/EVEX space
+// when possible in order to reduce code size or facilitate HW decoding.
//
+// Possible compression:
+// a. AVX512 instruction (EVEX) -> AVX instruction (VEX)
+// b. Promoted instruction (EVEX) -> pre-promotion instruction (legacy/VEX)
+// c. NDD (EVEX) -> non-NDD (legacy)
+// d. NF_ND (EVEX) -> NF (EVEX)
+//
+// Compression a, b and c can always reduce code size, with some exceptions
+// such as promoted 16-bit CRC32 which is as long as the legacy version.
+//
+// legacy:
+// crc32w %si, %eax ## encoding: [0x66,0xf2,0x0f,0x38,0xf1,0xc6]
+// promoted:
+// crc32w %si, %eax ## encoding: [0x62,0xf4,0x7d,0x08,0xf1,0xc6]
+//
+// From performance perspective, these should be same (same uops and same EXE
+// ports). From a FMV perspective, an older legacy encoding is preferred b/c it
+// can execute in more places (broader HW install base). So we will still do
+// the compression.
+//
+// Compression d can help hardware decode (HW may skip reading the NDD
+// register) although the instruction length remains unchanged.
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/X86BaseInfo.h"
@@ -38,37 +50,34 @@
using namespace llvm;
-// Including the generated EVEX2VEX tables.
-struct X86EvexToVexCompressTableEntry {
- uint16_t EvexOpc;
- uint16_t VexOpc;
+// Including the generated EVEX compression tables.
+struct X86CompressEVEXTableEntry {
+ uint16_t OldOpc;
+ uint16_t NewOpc;
- bool operator<(const X86EvexToVexCompressTableEntry &RHS) const {
- return EvexOpc < RHS.EvexOpc;
+ bool operator<(const X86CompressEVEXTableEntry &RHS) const {
+ return OldOpc < RHS.OldOpc;
}
- friend bool operator<(const X86EvexToVexCompressTableEntry &TE,
- unsigned Opc) {
- return TE.EvexOpc < Opc;
+ friend bool operator<(const X86CompressEVEXTableEntry &TE, unsigned Opc) {
+ return TE.OldOpc < Opc;
}
};
-#include "X86GenEVEX2VEXTables.inc"
+#include "X86GenCompressEVEXTables.inc"
-#define EVEX2VEX_DESC "Compressing EVEX instrs to VEX encoding when possible"
-#define EVEX2VEX_NAME "x86-evex-to-vex-compress"
+#define COMP_EVEX_DESC "Compressing EVEX instrs when possible"
+#define COMP_EVEX_NAME "x86-compress-evex"
-#define DEBUG_TYPE EVEX2VEX_NAME
+#define DEBUG_TYPE COMP_EVEX_NAME
namespace {
-class EvexToVexInstPass : public MachineFunctionPass {
+class CompressEVEXPass : public MachineFunctionPass {
public:
static char ID;
- EvexToVexInstPass() : MachineFunctionPass(ID) {}
- StringRef getPassName() const override { return EVEX2VEX_DESC; }
+ CompressEVEXPass() : MachineFunctionPass(ID) {}
+ StringRef getPassName() const override { return COMP_EVEX_DESC; }
- /// Loop over all of the basic blocks, replacing EVEX instructions
- /// by equivalent VEX instructions when possible for reducing code size.
bool runOnMachineFunction(MachineFunction &MF) override;
// This pass runs after regalloc and doesn't support VReg operands.
@@ -80,7 +89,7 @@ public:
} // end anonymous namespace
-char EvexToVexInstPass::ID = 0;
+char CompressEVEXPass::ID = 0;
static bool usesExtendedRegister(const MachineInstr &MI) {
auto isHiRegIdx = [](unsigned Reg) {
@@ -112,8 +121,8 @@ static bool usesExtendedRegister(const MachineInstr &MI) {
return false;
}
-static bool checkVEXInstPredicate(unsigned EvexOpc, const X86Subtarget &ST) {
- switch (EvexOpc) {
+static bool checkVEXInstPredicate(unsigned OldOpc, const X86Subtarget &ST) {
+ switch (OldOpc) {
default:
return true;
case X86::VCVTNEPS2BF16Z128rm:
@@ -151,15 +160,15 @@ static bool checkVEXInstPredicate(unsigned EvexOpc, const X86Subtarget &ST) {
}
// Do any custom cleanup needed to finalize the conversion.
-static bool performCustomAdjustments(MachineInstr &MI, unsigned VexOpc) {
- (void)VexOpc;
+static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) {
+ (void)NewOpc;
unsigned Opc = MI.getOpcode();
switch (Opc) {
case X86::VALIGNDZ128rri:
case X86::VALIGNDZ128rmi:
case X86::VALIGNQZ128rri:
case X86::VALIGNQZ128rmi: {
- assert((VexOpc == X86::VPALIGNRrri || VexOpc == X86::VPALIGNRrmi) &&
+ assert((NewOpc == X86::VPALIGNRrri || NewOpc == X86::VPALIGNRrmi) &&
"Unexpected new opcode!");
unsigned Scale =
(Opc == X86::VALIGNQZ128rri || Opc == X86::VALIGNQZ128rmi) ? 8 : 4;
@@ -175,8 +184,8 @@ static bool performCustomAdjustments(MachineInstr &MI, unsigned VexOpc) {
case X86::VSHUFI32X4Z256rri:
case X86::VSHUFI64X2Z256rmi:
case X86::VSHUFI64X2Z256rri: {
- assert((VexOpc == X86::VPERM2F128rr || VexOpc == X86::VPERM2I128rr ||
- VexOpc == X86::VPERM2F128rm || VexOpc == X86::VPERM2I128rm) &&
+ assert((NewOpc == X86::VPERM2F128rr || NewOpc == X86::VPERM2I128rr ||
+ NewOpc == X86::VPERM2F128rm || NewOpc == X86::VPERM2I128rm) &&
"Unexpected new opcode!");
MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands() - 1);
int64_t ImmVal = Imm.getImm();
@@ -200,7 +209,7 @@ static bool performCustomAdjustments(MachineInstr &MI, unsigned VexOpc) {
case X86::VRNDSCALESDZm_Int:
case X86::VRNDSCALESSZr_Int:
case X86::VRNDSCALESSZm_Int:
- const MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
+ const MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands() - 1);
int64_t ImmVal = Imm.getImm();
// Ensure that only bits 3:0 of the immediate are used.
if ((ImmVal & 0xf) != ImmVal)
@@ -211,86 +220,77 @@ static bool performCustomAdjustments(MachineInstr &MI, unsigned VexOpc) {
return true;
}
-// For EVEX instructions that can be encoded using VEX encoding
-// replace them by the VEX encoding in order to reduce size.
-static bool CompressEvexToVexImpl(MachineInstr &MI, const X86Subtarget &ST) {
- // VEX format.
- // # of bytes: 0,2,3 1 1 0,1 0,1,2,4 0,1
- // [Prefixes] [VEX] OPCODE ModR/M [SIB] [DISP] [IMM]
- //
- // EVEX format.
- // # of bytes: 4 1 1 1 4 / 1 1
- // [Prefixes] EVEX Opcode ModR/M [SIB] [Disp32] / [Disp8*N] [Immediate]
- const MCInstrDesc &Desc = MI.getDesc();
+static bool CompressEVEXImpl(MachineInstr &MI, const X86Subtarget &ST) {
+ uint64_t TSFlags = MI.getDesc().TSFlags;
// Check for EVEX instructions only.
- if ((Desc.TSFlags & X86II::EncodingMask) != X86II::EVEX)
+ if ((TSFlags & X86II::EncodingMask) != X86II::EVEX)
return false;
- // Check for EVEX instructions with mask or broadcast as in these cases
- // the EVEX prefix is needed in order to carry this information
- // thus preventing the transformation to VEX encoding.
- if (Desc.TSFlags & (X86II::EVEX_K | X86II::EVEX_B))
+ // Instructions with mask or 512-bit vector can't be converted to VEX.
+ if (TSFlags & (X86II::EVEX_K | X86II::EVEX_L2))
return false;
- // Check for EVEX instructions with L2 set. These instructions are 512-bits
- // and can't be converted to VEX.
- if (Desc.TSFlags & X86II::EVEX_L2)
+ // EVEX_B has several meanings.
+ // AVX512:
+ // register form: rounding control or SAE
+ // memory form: broadcast
+ //
+ // APX:
+ // MAP4: NDD
+ //
+ // For AVX512 cases, EVEX prefix is needed in order to carry this information
+ // thus preventing the transformation to VEX encoding.
+ if (TSFlags & X86II::EVEX_B)
return false;
- // Use the VEX.L bit to select the 128 or 256-bit table.
- ArrayRef<X86EvexToVexCompressTableEntry> Table =
- (Desc.TSFlags & X86II::VEX_L) ? ArrayRef(X86EvexToVex256CompressTable)
- : ArrayRef(X86EvexToVex128CompressTable);
+ ArrayRef<X86CompressEVEXTableEntry> Table = ArrayRef(X86CompressEVEXTable);
- unsigned EvexOpc = MI.getOpcode();
- const auto *I = llvm::lower_bound(Table, EvexOpc);
- if (I == Table.end() || I->EvexOpc != EvexOpc)
+ unsigned Opc = MI.getOpcode();
+ const auto *I = llvm::lower_bound(Table, Opc);
+ if (I == Table.end() || I->OldOpc != Opc)
return false;
- if (usesExtendedRegister(MI))
- return false;
- if (!checkVEXInstPredicate(EvexOpc, ST))
- return false;
- if (!performCustomAdjustments(MI, I->VexOpc))
+ if (usesExtendedRegister(MI) || !checkVEXInstPredicate(Opc, ST) ||
+ !performCustomAdjustments(MI, I->NewOpc))
return false;
- MI.setDesc(ST.getInstrInfo()->get(I->VexOpc));
- MI.setAsmPrinterFlag(X86::AC_EVEX_2_VEX);
+ const MCInstrDesc &NewDesc = ST.getInstrInfo()->get(I->NewOpc);
+ MI.setDesc(NewDesc);
+ uint64_t Encoding = NewDesc.TSFlags & X86II::EncodingMask;
+ auto AsmComment =
+ (Encoding == X86II::VEX) ? X86::AC_EVEX_2_VEX : X86::AC_EVEX_2_LEGACY;
+ MI.setAsmPrinterFlag(AsmComment);
return true;
}
-bool EvexToVexInstPass::runOnMachineFunction(MachineFunction &MF) {
+bool CompressEVEXPass::runOnMachineFunction(MachineFunction &MF) {
#ifndef NDEBUG
// Make sure the tables are sorted.
static std::atomic<bool> TableChecked(false);
if (!TableChecked.load(std::memory_order_relaxed)) {
- assert(llvm::is_sorted(X86EvexToVex128CompressTable) &&
- "X86EvexToVex128CompressTable is not sorted!");
- assert(llvm::is_sorted(X86EvexToVex256CompressTable) &&
- "X86EvexToVex256CompressTable is not sorted!");
+ assert(llvm::is_sorted(X86CompressEVEXTable) &&
+ "X86CompressEVEXTable is not sorted!");
TableChecked.store(true, std::memory_order_relaxed);
}
#endif
const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
- if (!ST.hasAVX512())
+ if (!ST.hasAVX512() && !ST.hasEGPR() && !ST.hasNDD())
return false;
bool Changed = false;
- /// Go over all basic blocks in function and replace
- /// EVEX encoded instrs by VEX encoding when possible.
for (MachineBasicBlock &MBB : MF) {
// Traverse the basic block.
for (MachineInstr &MI : MBB)
- Changed |= CompressEvexToVexImpl(MI, ST);
+ Changed |= CompressEVEXImpl(MI, ST);
}
return Changed;
}
-INITIALIZE_PASS(EvexToVexInstPass, EVEX2VEX_NAME, EVEX2VEX_DESC, false, false)
+INITIALIZE_PASS(CompressEVEXPass, COMP_EVEX_NAME, COMP_EVEX_DESC, false, false)
-FunctionPass *llvm::createX86EvexToVexInsts() {
- return new EvexToVexInstPass();
+FunctionPass *llvm::createX86CompressEVEXPass() {
+ return new CompressEVEXPass();
}
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index b13bf361ab79..aad839b83ee1 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -173,7 +173,6 @@ static FlagArithMnemonic getMnemonicFromOpcode(unsigned Opcode) {
#define LLVM_EXPAND_ADC_SBB_INSTR(MNEMONIC) \
LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr) \
- LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rr_REV) \
LLVM_EXPAND_INSTR_SIZES(MNEMONIC, rm) \
LLVM_EXPAND_INSTR_SIZES(MNEMONIC, mr) \
case X86::MNEMONIC##8ri: \
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8c4f091c793d..c14e03197aae 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -57672,7 +57672,7 @@ X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {
}
Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
- if (ML->isInnermost() &&
+ if (ML && ML->isInnermost() &&
ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
return TargetLowering::getPrefLoopAlignment();
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index c3a673f97d34..fe7d90fbcdf7 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -448,7 +448,7 @@ multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
X86VectorVTInfo< 2, EltVT64, VR128X>,
X86VectorVTInfo< 4, EltVT64, VR256X>,
null_frag, vinsert128_insert, sched>,
- VEX_W1X, EVEX_V256;
+ EVEX_V256, REX_W;
// Even with DQI we'd like to only use these instructions for masking.
let Predicates = [HasDQI] in {
@@ -750,7 +750,7 @@ multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
X86VectorVTInfo< 4, EltVT64, VR256X>,
X86VectorVTInfo< 2, EltVT64, VR128X>,
null_frag, vextract128_extract, SchedRR, SchedMR>,
- VEX_W1X, EVEX_V256, EVEX_CD8<64, CD8VT2>;
+ EVEX_V256, EVEX_CD8<64, CD8VT2>, REX_W;
// Even with DQI we'd like to only use these instructions for masking.
let Predicates = [HasDQI] in {
@@ -1161,7 +1161,7 @@ multiclass avx512_fp_broadcast_ss<bits<8> opc, string OpcodeStr,
defm VBROADCASTSS : avx512_fp_broadcast_ss<0x18, "vbroadcastss",
avx512vl_f32_info>;
defm VBROADCASTSD : avx512_fp_broadcast_sd<0x19, "vbroadcastsd",
- avx512vl_f64_info>, VEX_W1X;
+ avx512vl_f64_info>, REX_W;
multiclass avx512_int_broadcast_reg<bits<8> opc, SchedWrite SchedRR,
X86VectorVTInfo _, SDPatternOperator OpNode,
@@ -1267,7 +1267,7 @@ defm VPBROADCASTW : avx512_int_broadcast_rm_vl<0x79, "vpbroadcastw",
defm VPBROADCASTD : avx512_int_broadcast_rm_vl<0x58, "vpbroadcastd",
avx512vl_i32_info, HasAVX512, 1>;
defm VPBROADCASTQ : avx512_int_broadcast_rm_vl<0x59, "vpbroadcastq",
- avx512vl_i64_info, HasAVX512, 1>, VEX_W1X;
+ avx512vl_i64_info, HasAVX512, 1>, REX_W;
multiclass avx512_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
SDPatternOperator OpNode,
@@ -1460,11 +1460,11 @@ let Predicates = [HasBF16, HasVLX] in
let Predicates = [HasVLX, HasDQI] in {
defm VBROADCASTI64X2Z128 : avx512_subvec_broadcast_rm_dq<0x5a, "vbroadcasti64x2",
- X86SubVBroadcastld128, v4i64x_info, v2i64x_info>, VEX_W1X,
- EVEX_V256, EVEX_CD8<64, CD8VT2>;
+ X86SubVBroadcastld128, v4i64x_info, v2i64x_info>,
+ EVEX_V256, EVEX_CD8<64, CD8VT2>, REX_W;
defm VBROADCASTF64X2Z128 : avx512_subvec_broadcast_rm_dq<0x1a, "vbroadcastf64x2",
- X86SubVBroadcastld128, v4f64x_info, v2f64x_info>, VEX_W1X,
- EVEX_V256, EVEX_CD8<64, CD8VT2>;
+ X86SubVBroadcastld128, v4f64x_info, v2f64x_info>,
+ EVEX_V256, EVEX_CD8<64, CD8VT2>, REX_W;
// Patterns for selects of bitcasted operations.
def : Pat<(vselect_mask VK4WM:$mask,
@@ -3185,15 +3185,13 @@ defm : operation_subvector_mask_lowering<VK32, v32i1, VK64, v64i1>;
multiclass avx512_load<bits<8> opc, string OpcodeStr, string Name,
X86VectorVTInfo _, PatFrag ld_frag, PatFrag mload,
- X86SchedWriteMoveLS Sched, string EVEX2VEXOvrd,
- bit NoRMPattern = 0,
+ X86SchedWriteMoveLS Sched, bit NoRMPattern = 0,
SDPatternOperator SelectOprr = vselect> {
let hasSideEffects = 0 in {
let isMoveReg = 1 in
def rr : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst), (ins _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
- _.ExeDomain>, EVEX, Sched<[Sched.RR]>,
- EVEX2VEXOverride<EVEX2VEXOvrd#"rr">;
+ _.ExeDomain>, EVEX, Sched<[Sched.RR]>;
def rrkz : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src),
!strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
@@ -3209,8 +3207,7 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, string Name,
!if(NoRMPattern, [],
[(set _.RC:$dst,
(_.VT (ld_frag addr:$src)))]),
- _.ExeDomain>, EVEX, Sched<[Sched.RM]>,
- EVEX2VEXOverride<EVEX2VEXOvrd#"rm">;
+ _.ExeDomain>, EVEX, Sched<[Sched.RM]>;
let Constraints = "$src0 = $dst", isConvertibleToThreeAddress = 1 in {
def rrk : AVX512PI<opc, MRMSrcReg, (outs _.RC:$dst),
@@ -3253,53 +3250,48 @@ multiclass avx512_load<bits<8> opc, string OpcodeStr, string Name,
multiclass avx512_alignedload_vl<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _, Predicate prd,
X86SchedWriteMoveLSWidths Sched,
- string EVEX2VEXOvrd, bit NoRMPattern = 0> {
+ bit NoRMPattern = 0> {
let Predicates = [prd] in
defm Z : avx512_load<opc, OpcodeStr, NAME, _.info512,
_.info512.AlignedLdFrag, masked_load_aligned,
- Sched.ZMM, "", NoRMPattern>, EVEX_V512;
+ Sched.ZMM, NoRMPattern>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : avx512_load<opc, OpcodeStr, NAME, _.info256,
_.info256.AlignedLdFrag, masked_load_aligned,
- Sched.YMM, EVEX2VEXOvrd#"Y", NoRMPattern>, EVEX_V256;
+ Sched.YMM, NoRMPattern>, EVEX_V256;
defm Z128 : avx512_load<opc, OpcodeStr, NAME, _.info128,
_.info128.AlignedLdFrag, masked_load_aligned,
- Sched.XMM, EVEX2VEXOvrd, NoRMPattern>, EVEX_V128;
+ Sched.XMM, NoRMPattern>, EVEX_V128;
}
}
multiclass avx512_load_vl<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _, Predicate prd,
X86SchedWriteMoveLSWidths Sched,
- string EVEX2VEXOvrd, bit NoRMPattern = 0,
+ bit NoRMPattern = 0,
SDPatternOperator SelectOprr = vselect> {
let Predicates = [prd] in
defm Z : avx512_load<opc, OpcodeStr, NAME, _.info512, _.info512.LdFrag,
- masked_load, Sched.ZMM, "",
- NoRMPattern, SelectOprr>, EVEX_V512;
+ masked_load, Sched.ZMM, NoRMPattern, SelectOprr>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : avx512_load<opc, OpcodeStr, NAME, _.info256, _.info256.LdFrag,
- masked_load, Sched.YMM, EVEX2VEXOvrd#"Y",
- NoRMPattern, SelectOprr>, EVEX_V256;
+ masked_load, Sched.YMM, NoRMPattern, SelectOprr>, EVEX_V256;
defm Z128 : avx512_load<opc, OpcodeStr, NAME, _.info128, _.info128.LdFrag,
- masked_load, Sched.XMM, EVEX2VEXOvrd,
- NoRMPattern, SelectOprr>, EVEX_V128;
+ masked_load, Sched.XMM, NoRMPattern, SelectOprr>, EVEX_V128;
}
}
multiclass avx512_store<bits<8> opc, string OpcodeStr, string BaseName,
X86VectorVTInfo _, PatFrag st_frag, PatFrag mstore,
- X86SchedWriteMoveLS Sched, string EVEX2VEXOvrd,
- bit NoMRPattern = 0> {
+ X86SchedWriteMoveLS Sched, bit NoMRPattern = 0> {
let hasSideEffects = 0, isCodeGenOnly = 1, ForceDisassemble = 1 in {
let isMoveReg = 1 in
def rr_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst), (ins _.RC:$src),
OpcodeStr # "\t{$src, $dst|$dst, $src}",
[], _.ExeDomain>, EVEX,
- Sched<[Sched.RR]>,
- EVEX2VEXOverride<EVEX2VEXOvrd#"rr_REV">;
+ Sched<[Sched.RR]>;
def rrk_REV : AVX512PI<opc, MRMDestReg, (outs _.RC:$dst),
(ins _.KRCWM:$mask, _.RC:$src),
OpcodeStr # "\t{$src, ${dst} {${mask}}|"#
@@ -3319,8 +3311,7 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, string BaseName,
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
!if(NoMRPattern, [],
[(st_frag (_.VT _.RC:$src), addr:$dst)]),
- _.ExeDomain>, EVEX, Sched<[Sched.MR]>,
- EVEX2VEXOverride<EVEX2VEXOvrd#"mr">;
+ _.ExeDomain>, EVEX, Sched<[Sched.MR]>;
def mrk : AVX512PI<opc, MRMDestMem, (outs),
(ins _.MemOp:$dst, _.KRCWM:$mask, _.RC:$src),
OpcodeStr # "\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}",
@@ -3344,102 +3335,92 @@ multiclass avx512_store<bits<8> opc, string OpcodeStr, string BaseName,
multiclass avx512_store_vl< bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _, Predicate prd,
X86SchedWriteMoveLSWidths Sched,
- string EVEX2VEXOvrd, bit NoMRPattern = 0> {
+ bit NoMRPattern = 0> {
let Predicates = [prd] in
defm Z : avx512_store<opc, OpcodeStr, NAME, _.info512, store,
- masked_store, Sched.ZMM, "",
- NoMRPattern>, EVEX_V512;
+ masked_store, Sched.ZMM, NoMRPattern>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : avx512_store<opc, OpcodeStr, NAME, _.info256, store,
- masked_store, Sched.YMM,
- EVEX2VEXOvrd#"Y", NoMRPattern>, EVEX_V256;
+ masked_store, Sched.YMM, NoMRPattern>, EVEX_V256;
defm Z128 : avx512_store<opc, OpcodeStr, NAME, _.info128, store,
- masked_store, Sched.XMM, EVEX2VEXOvrd,
- NoMRPattern>, EVEX_V128;
+ masked_store, Sched.XMM, NoMRPattern>, EVEX_V128;
}
}
multiclass avx512_alignedstore_vl<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo _, Predicate prd,
X86SchedWriteMoveLSWidths Sched,
- string EVEX2VEXOvrd, bit NoMRPattern = 0> {
+ bit NoMRPattern = 0> {
let Predicates = [prd] in
defm Z : avx512_store<opc, OpcodeStr, NAME, _.info512, alignedstore,
- masked_store_aligned, Sched.ZMM, "",
- NoMRPattern>, EVEX_V512;
+ masked_store_aligned, Sched.ZMM, NoMRPattern>, EVEX_V512;
let Predicates = [prd, HasVLX] in {
defm Z256 : avx512_store<opc, OpcodeStr, NAME, _.info256, alignedstore,
- masked_store_aligned, Sched.YMM,
- EVEX2VEXOvrd#"Y", NoMRPattern>, EVEX_V256;
+ masked_store_aligned, Sched.YMM, NoMRPattern>, EVEX_V256;
defm Z128 : avx512_store<opc, OpcodeStr, NAME, _.info128, alignedstore,
- masked_store_aligned, Sched.XMM, EVEX2VEXOvrd,
- NoMRPattern>, EVEX_V128;
+ masked_store_aligned, Sched.XMM, NoMRPattern>, EVEX_V128;
}
}
defm VMOVAPS : avx512_alignedload_vl<0x28, "vmovaps", avx512vl_f32_info,
- HasAVX512, SchedWriteFMoveLS, "VMOVAPS">,
+ HasAVX512, SchedWriteFMoveLS>,
avx512_alignedstore_vl<0x29, "vmovaps", avx512vl_f32_info,
- HasAVX512, SchedWriteFMoveLS, "VMOVAPS">,
+ HasAVX512, SchedWriteFMoveLS>,
TB, EVEX_CD8<32, CD8VF>;
defm VMOVAPD : avx512_alignedload_vl<0x28, "vmovapd", avx512vl_f64_info,
- HasAVX512, SchedWriteFMoveLS, "VMOVAPD">,
+ HasAVX512, SchedWriteFMoveLS>,
avx512_alignedstore_vl<0x29, "vmovapd", avx512vl_f64_info,
- HasAVX512, SchedWriteFMoveLS, "VMOVAPD">,
+ HasAVX512, SchedWriteFMoveLS>,
TB, PD, REX_W, EVEX_CD8<64, CD8VF>;
defm VMOVUPS : avx512_load_vl<0x10, "vmovups", avx512vl_f32_info, HasAVX512,
- SchedWriteFMoveLS, "VMOVUPS", 0, null_frag>,
+ SchedWriteFMoveLS, 0, null_frag>,
avx512_store_vl<0x11, "vmovups", avx512vl_f32_info, HasAVX512,
- SchedWriteFMoveLS, "VMOVUPS">,
+ SchedWriteFMoveLS>,
TB, EVEX_CD8<32, CD8VF>;
defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", avx512vl_f64_info, HasAVX512,
- SchedWriteFMoveLS, "VMOVUPD", 0, null_frag>,
+ SchedWriteFMoveLS, 0, null_frag>,
avx512_store_vl<0x11, "vmovupd", avx512vl_f64_info, HasAVX512,
- SchedWriteFMoveLS, "VMOVUPD">,
+ SchedWriteFMoveLS>,
TB, PD, REX_W, EVEX_CD8<64, CD8VF>;
defm VMOVDQA32 : avx512_alignedload_vl<0x6F, "vmovdqa32", avx512vl_i32_info,
- HasAVX512, SchedWriteVecMoveLS,
- "VMOVDQA", 1>,
+ HasAVX512, SchedWriteVecMoveLS, 1>,
avx512_alignedstore_vl<0x7F, "vmovdqa32", avx512vl_i32_info,
- HasAVX512, SchedWriteVecMoveLS,
- "VMOVDQA", 1>,
+ HasAVX512, SchedWriteVecMoveLS, 1>,
TB, PD, EVEX_CD8<32, CD8VF>;
defm VMOVDQA64 : avx512_alignedload_vl<0x6F, "vmovdqa64", avx512vl_i64_info,
- HasAVX512, SchedWriteVecMoveLS,
- "VMOVDQA">,
+ HasAVX512, SchedWriteVecMoveLS>,
avx512_alignedstore_vl<0x7F, "vmovdqa64", avx512vl_i64_info,
- HasAVX512, SchedWriteVecMoveLS,
- "VMOVDQA">,
+ HasAVX512, SchedWriteVecMoveLS>,
TB, PD, REX_W, EVEX_CD8<64, CD8VF>;
defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", avx512vl_i8_info, HasBWI,
- SchedWriteVecMoveLS, "VMOVDQU", 1>,
+ SchedWriteVecMoveLS, 1>,
avx512_store_vl<0x7F, "vmovdqu8", avx512vl_i8_info, HasBWI,
- SchedWriteVecMoveLS, "VMOVDQU", 1>,
+ SchedWriteVecMoveLS, 1>,
TB, XD, EVEX_CD8<8, CD8VF>;
defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", avx512vl_i16_info, HasBWI,
- SchedWriteVecMoveLS, "VMOVDQU", 1>,
+ SchedWriteVecMoveLS, 1>,
avx512_store_vl<0x7F, "vmovdqu16", avx512vl_i16_info, HasBWI,
- SchedWriteVecMoveLS, "VMOVDQU", 1>,
+ SchedWriteVecMoveLS, 1>,
TB, XD, REX_W, EVEX_CD8<16, CD8VF>;
defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
- SchedWriteVecMoveLS, "VMOVDQU", 1, null_frag>,
+ SchedWriteVecMoveLS, 1, null_frag>,
avx512_store_vl<0x7F, "vmovdqu32", avx512vl_i32_info, HasAVX512,
- SchedWriteVecMoveLS, "VMOVDQU", 1>,
+ SchedWriteVecMoveLS, 1>,
TB, XS, EVEX_CD8<32, CD8VF>;
defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
- SchedWriteVecMoveLS, "VMOVDQU", 0, null_frag>,
+ SchedWriteVecMoveLS, 0, null_frag>,
avx512_store_vl<0x7F, "vmovdqu64", avx512vl_i64_info, HasAVX512,
- SchedWriteVecMoveLS, "VMOVDQU">,
+ SchedWriteVecMoveLS>,
TB, XS, REX_W, EVEX_CD8<64, CD8VF>;
// Special instructions to help with spilling when we don't have VLX. We need
@@ -4844,8 +4825,7 @@ defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmulld", mul,
defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmullw", mul,
SchedWriteVecIMul, HasBWI, 1>;
defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmullq", mul,
- SchedWriteVecIMul, HasDQI, 1>, T8,
- NotEVEX2VEXConvertible;
+ SchedWriteVecIMul, HasDQI, 1>, T8;
defm VPMULHW : avx512_binop_rm_vl_w<0xE5, "vpmulhw", mulhs, SchedWriteVecIMul,
HasBWI, 1>;
defm VPMULHUW : avx512_binop_rm_vl_w<0xE4, "vpmulhuw", mulhu, SchedWriteVecIMul,
@@ -4989,8 +4969,7 @@ defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxsw", smax,
defm VPMAXSD : avx512_binop_rm_vl_d<0x3D, "vpmaxsd", smax,
SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMAXSQ : avx512_binop_rm_vl_q<0x3D, "vpmaxsq", smax,
- SchedWriteVecALU, HasAVX512, 1>, T8,
- NotEVEX2VEXConvertible;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxub", umax,
SchedWriteVecALU, HasBWI, 1>;
@@ -4999,8 +4978,7 @@ defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxuw", umax,
defm VPMAXUD : avx512_binop_rm_vl_d<0x3F, "vpmaxud", umax,
SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMAXUQ : avx512_binop_rm_vl_q<0x3F, "vpmaxuq", umax,
- SchedWriteVecALU, HasAVX512, 1>, T8,
- NotEVEX2VEXConvertible;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpminsb", smin,
SchedWriteVecALU, HasBWI, 1>, T8;
@@ -5009,8 +4987,7 @@ defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpminsw", smin,
defm VPMINSD : avx512_binop_rm_vl_d<0x39, "vpminsd", smin,
SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMINSQ : avx512_binop_rm_vl_q<0x39, "vpminsq", smin,
- SchedWriteVecALU, HasAVX512, 1>, T8,
- NotEVEX2VEXConvertible;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminub", umin,
SchedWriteVecALU, HasBWI, 1>;
@@ -5019,8 +4996,7 @@ defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminuw", umin,
defm VPMINUD : avx512_binop_rm_vl_d<0x3B, "vpminud", umin,
SchedWriteVecALU, HasAVX512, 1>, T8;
defm VPMINUQ : avx512_binop_rm_vl_q<0x3B, "vpminuq", umin,
- SchedWriteVecALU, HasAVX512, 1>, T8,
- NotEVEX2VEXConvertible;
+ SchedWriteVecALU, HasAVX512, 1>, T8;
// PMULLQ: Use 512bit version to implement 128/256 bit in case NoVLX.
let Predicates = [HasDQI, NoVLX] in {
@@ -5405,8 +5381,7 @@ multiclass avx512_fp_scalar_round<bits<8> opc, string OpcodeStr,X86VectorVTInfo
}
multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
SDNode OpNode, SDNode VecNode, SDNode SaeNode,
- X86FoldableSchedWrite sched, bit IsCommutable,
- string EVEX2VexOvrd> {
+ X86FoldableSchedWrite sched, bit IsCommutable> {
let ExeDomain = _.ExeDomain in {
defm rr_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2), OpcodeStr,
@@ -5427,8 +5402,7 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
(ins _.FRC:$src1, _.FRC:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))]>,
- Sched<[sched]>,
- EVEX2VEXOverride<EVEX2VexOvrd#"rr"> {
+ Sched<[sched]> {
let isCommutable = IsCommutable;
}
def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
@@ -5436,8 +5410,7 @@ multiclass avx512_fp_scalar_sae<bits<8> opc, string OpcodeStr,X86VectorVTInfo _,
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set _.FRC:$dst, (OpNode _.FRC:$src1,
(_.ScalarLdFrag addr:$src2)))]>,
- Sched<[sched.Folded, sched.ReadAfterFold]>,
- EVEX2VEXOverride<EVEX2VexOvrd#"rm">;
+ Sched<[sched.Folded, sched.ReadAfterFold]>;
}
let Uses = [MXCSR] in
@@ -5474,19 +5447,15 @@ multiclass avx512_binop_s_sae<bits<8> opc, string OpcodeStr, SDNode OpNode,
SDNode VecNode, SDNode SaeNode,
X86SchedWriteSizes sched, bit IsCommutable> {
defm SSZ : avx512_fp_scalar_sae<opc, OpcodeStr#"ss", f32x_info, OpNode,
- VecNode, SaeNode, sched.PS.Scl, IsCommutable,
- NAME#"SS">,
+ VecNode, SaeNode, sched.PS.Scl, IsCommutable>,
TB, XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>;
defm SDZ : avx512_fp_scalar_sae<opc, OpcodeStr#"sd", f64x_info, OpNode,
- VecNode, SaeNode, sched.PD.Scl, IsCommutable,
- NAME#"SD">,
+ VecNode, SaeNode, sched.PD.Scl, IsCommutable>,
TB, XD, REX_W, EVEX, VVVV, VEX_LIG, EVEX_CD8<64, CD8VT1>;
let Predicates = [HasFP16] in {
defm SHZ : avx512_fp_scalar_sae<opc, OpcodeStr#"sh", f16x_info, OpNode,
- VecNode, SaeNode, sched.PH.Scl, IsCommutable,
- NAME#"SH">,
- T_MAP5, XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>,
- NotEVEX2VEXConvertible;
+ VecNode, SaeNode, sched.PH.Scl, IsCommutable>,
+ T_MAP5, XS, EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>;
}
}
defm VADD : avx512_binop_s_round<0x58, "vadd", any_fadd, X86fadds, X86faddRnds,
@@ -5506,14 +5475,13 @@ defm VMAX : avx512_binop_s_sae<0x5F, "vmax", X86fmax, X86fmaxs, X86fmaxSAEs,
// X86fminc and X86fmaxc instead of X86fmin and X86fmax
multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
X86VectorVTInfo _, SDNode OpNode,
- X86FoldableSchedWrite sched,
- string EVEX2VEXOvrd> {
+ X86FoldableSchedWrite sched> {
let isCodeGenOnly = 1, Predicates = [HasAVX512], ExeDomain = _.ExeDomain in {
def rr : I< opc, MRMSrcReg, (outs _.FRC:$dst),
(ins _.FRC:$src1, _.FRC:$src2),
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set _.FRC:$dst, (OpNode _.FRC:$src1, _.FRC:$src2))]>,
- Sched<[sched]>, EVEX2VEXOverride<EVEX2VEXOvrd#"rr"> {
+ Sched<[sched]> {
let isCommutable = 1;
}
def rm : I< opc, MRMSrcMem, (outs _.FRC:$dst),
@@ -5521,36 +5489,34 @@ multiclass avx512_comutable_binop_s<bits<8> opc, string OpcodeStr,
OpcodeStr#"\t{$src2, $src1, $dst|$dst, $src1, $src2}",
[(set _.FRC:$dst, (OpNode _.FRC:$src1,
(_.ScalarLdFrag addr:$src2)))]>,
- Sched<[sched.Folded, sched.ReadAfterFold]>,
- EVEX2VEXOverride<EVEX2VEXOvrd#"rm">;
+ Sched<[sched.Folded, sched.ReadAfterFold]>;
}
}
defm VMINCSSZ : avx512_comutable_binop_s<0x5D, "vminss", f32x_info, X86fminc,
- SchedWriteFCmp.Scl, "VMINCSS">, TB, XS,
+ SchedWriteFCmp.Scl>, TB, XS,
EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>, SIMD_EXC;
defm VMINCSDZ : avx512_comutable_binop_s<0x5D, "vminsd", f64x_info, X86fminc,
- SchedWriteFCmp.Scl, "VMINCSD">, TB, XD,
+ SchedWriteFCmp.Scl>, TB, XD,
REX_W, EVEX, VVVV, VEX_LIG,
EVEX_CD8<64, CD8VT1>, SIMD_EXC;
defm VMAXCSSZ : avx512_comutable_binop_s<0x5F, "vmaxss", f32x_info, X86fmaxc,
- SchedWriteFCmp.Scl, "VMAXCSS">, TB, XS,
+ SchedWriteFCmp.Scl>, TB, XS,
EVEX, VVVV, VEX_LIG, EVEX_CD8<32, CD8VT1>, SIMD_EXC;
defm VMAXCSDZ : avx512_comutable_binop_s<0x5F, "vmaxsd", f64x_info, X86fmaxc,
- SchedWriteFCmp.Scl, "VMAXCSD">, TB, XD,
+ SchedWriteFCmp.Scl>, TB, XD,
REX_W, EVEX, VVVV, VEX_LIG,
EVEX_CD8<64, CD8VT1>, SIMD_EXC;
defm VMINCSHZ : avx512_comutable_binop_s<0x5D, "vminsh", f16x_info, X86fminc,
- SchedWriteFCmp.Scl, "VMINCSH">, T_MAP5, XS,
- EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>, SIMD_EXC,
- NotEVEX2VEXConvertible;
+ SchedWriteFCmp.Scl>, T_MAP5, XS,
+ EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>, SIMD_EXC;
+
defm VMAXCSHZ : avx512_comutable_binop_s<0x5F, "vmaxsh", f16x_info, X86fmaxc,
- SchedWriteFCmp.Scl, "VMAXCSH">, T_MAP5, XS,
- EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>, SIMD_EXC,
- NotEVEX2VEXConvertible;
+ SchedWriteFCmp.Scl>, T_MAP5, XS,
+ EVEX, VVVV, VEX_LIG, EVEX_CD8<16, CD8VT1>, SIMD_EXC;
multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
SDPatternOperator MaskOpNode,
@@ -5820,8 +5786,7 @@ multiclass avx512_fp_scalef_all<bits<8> opc, bits<8> opcScaler, string OpcodeStr
EVEX_V256, EVEX_CD8<16, CD8VF>, T_MAP6, PD;
}
}
-defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef",
- SchedWriteFAdd>, NotEVEX2VEXConvertible;
+defm VSCALEF : avx512_fp_scalef_all<0x2C, 0x2D, "vscalef", SchedWriteFAdd>;
//===----------------------------------------------------------------------===//
// AVX-512 VPTESTM instructions
@@ -5985,11 +5950,9 @@ multiclass avx512_shift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
multiclass avx512_shift_types<bits<8> opcd, bits<8> opcq, bits<8> opcw,
string OpcodeStr, SDNode OpNode,
- X86SchedWriteWidths sched,
- bit NotEVEX2VEXConvertibleQ = 0> {
+ X86SchedWriteWidths sched> {
defm D : avx512_shift_sizes<opcd, OpcodeStr#"d", OpNode, sched, v4i32,
avx512vl_i32_info, HasAVX512>;
- let notEVEX2VEXConvertible = NotEVEX2VEXConvertibleQ in
defm Q : avx512_shift_sizes<opcq, OpcodeStr#"q", OpNode, sched, v2i64,
avx512vl_i64_info, HasAVX512>, REX_W;
defm W : avx512_shift_sizes<opcw, OpcodeStr#"w", OpNode, sched, v8i16,
@@ -6034,11 +5997,9 @@ multiclass avx512_shift_rmi_w<bits<8> opcw, Format ImmFormR, Format ImmFormM,
multiclass avx512_shift_rmi_dq<bits<8> opcd, bits<8> opcq,
Format ImmFormR, Format ImmFormM,
string OpcodeStr, SDNode OpNode,
- X86SchedWriteWidths sched,
- bit NotEVEX2VEXConvertibleQ = 0> {
+ X86SchedWriteWidths sched> {
defm D: avx512_shift_rmi_sizes<opcd, ImmFormR, ImmFormM, OpcodeStr#"d", OpNode,
sched, avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
- let notEVEX2VEXConvertible = NotEVEX2VEXConvertibleQ in
defm Q: avx512_shift_rmi_sizes<opcq, ImmFormR, ImmFormM, OpcodeStr#"q", OpNode,
sched, avx512vl_i64_info>, EVEX_CD8<64, CD8VF>, REX_W;
}
@@ -6054,7 +6015,7 @@ defm VPSLL : avx512_shift_rmi_dq<0x72, 0x73, MRM6r, MRM6m, "vpsll", X86vshli,
SchedWriteVecShiftImm>, AVX512BIi8Base, EVEX, VVVV;
defm VPSRA : avx512_shift_rmi_dq<0x72, 0x72, MRM4r, MRM4m, "vpsra", X86vsrai,
- SchedWriteVecShiftImm, 1>,
+ SchedWriteVecShiftImm>,
avx512_shift_rmi_w<0x71, MRM4r, MRM4m, "vpsraw", X86vsrai,
SchedWriteVecShiftImm>, AVX512BIi8Base, EVEX, VVVV;
@@ -6066,7 +6027,7 @@ defm VPROL : avx512_shift_rmi_dq<0x72, 0x72, MRM1r, MRM1m, "vprol", X86vrotli,
defm VPSLL : avx512_shift_types<0xF2, 0xF3, 0xF1, "vpsll", X86vshl,
SchedWriteVecShift>;
defm VPSRA : avx512_shift_types<0xE2, 0xE2, 0xE1, "vpsra", X86vsra,
- SchedWriteVecShift, 1>;
+ SchedWriteVecShift>;
defm VPSRL : avx512_shift_types<0xD2, 0xD3, 0xD1, "vpsrl", X86vsrl,
SchedWriteVecShift>;
@@ -6435,7 +6396,7 @@ defm VPERMILPS : avx512_permil<"vpermilps", 0x04, 0x0C, avx512vl_f32_info,
avx512vl_i32_info>;
let ExeDomain = SSEPackedDouble in
defm VPERMILPD : avx512_permil<"vpermilpd", 0x05, 0x0D, avx512vl_f64_info,
- avx512vl_i64_info>, VEX_W1X;
+ avx512vl_i64_info>, REX_W;
//===----------------------------------------------------------------------===//
// AVX-512 - VPSHUFD, VPSHUFLW, VPSHUFHW
@@ -8443,9 +8404,9 @@ multiclass avx512_cvtqq2pd<bits<8> opc, string OpcodeStr, SDPatternOperator OpNo
}
let Predicates = [HasDQI, HasVLX] in {
defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v2f64x_info, v2i64x_info, OpNode,
- MaskOpNode, sched.XMM>, EVEX_V128, NotEVEX2VEXConvertible;
+ MaskOpNode, sched.XMM>, EVEX_V128;
defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v4f64x_info, v4i64x_info, OpNode,
- MaskOpNode, sched.YMM>, EVEX_V256, NotEVEX2VEXConvertible;
+ MaskOpNode, sched.YMM>, EVEX_V256;
}
}
@@ -8524,11 +8485,10 @@ multiclass avx512_cvtqq2ps_dq2ph<bits<8> opc, string OpcodeStr, SDPatternOperato
defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, _dst.info128, _src.info128, null_frag,
null_frag, sched.XMM, _src.info128.BroadcastStr,
"{x}", i128mem, _src.info128.KRCWM>,
- EVEX_V128, NotEVEX2VEXConvertible;
+ EVEX_V128;
defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, _dst.info128, _src.info256, OpNode,
MaskOpNode, sched.YMM, _src.info256.BroadcastStr,
- "{y}">, EVEX_V256,
- NotEVEX2VEXConvertible;
+ "{y}">, EVEX_V256;
// Special patterns to allow use of X86VM[SU]intToFP for masking. Instruction
// patterns have been disabled with null_frag.
@@ -10882,8 +10842,7 @@ defm VGETMANTSH: avx512_common_fp_sae_scalar_imm<"vgetmantsh", f16x_info,
multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr,
X86FoldableSchedWrite sched,
X86VectorVTInfo _,
- X86VectorVTInfo CastInfo,
- string EVEX2VEXOvrd> {
+ X86VectorVTInfo CastInfo> {
let ExeDomain = _.ExeDomain in {
defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2, u8imm:$src3),
@@ -10891,7 +10850,7 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr,
(_.VT (bitconvert
(CastInfo.VT (X86Shuf128 _.RC:$src1, _.RC:$src2,
(i8 timm:$src3)))))>,
- Sched<[sched]>, EVEX2VEXOverride<EVEX2VEXOvrd#"rr">;
+ Sched<[sched]>;
defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -10900,8 +10859,7 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr,
(CastInfo.VT (X86Shuf128 _.RC:$src1,
(CastInfo.LdFrag addr:$src2),
(i8 timm:$src3)))))>,
- Sched<[sched.Folded, sched.ReadAfterFold]>,
- EVEX2VEXOverride<EVEX2VEXOvrd#"rm">;
+ Sched<[sched.Folded, sched.ReadAfterFold]>;
defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, ${src2}"#_.BroadcastStr#", $src1",
@@ -10918,45 +10876,40 @@ multiclass avx512_shuff_packed_128_common<bits<8> opc, string OpcodeStr,
multiclass avx512_shuff_packed_128<string OpcodeStr, X86FoldableSchedWrite sched,
AVX512VLVectorVTInfo _,
- AVX512VLVectorVTInfo CastInfo, bits<8> opc,
- string EVEX2VEXOvrd>{
+ AVX512VLVectorVTInfo CastInfo, bits<8> opc>{
let Predicates = [HasAVX512] in
defm Z : avx512_shuff_packed_128_common<opc, OpcodeStr, sched,
- _.info512, CastInfo.info512, "">, EVEX_V512;
+ _.info512, CastInfo.info512>, EVEX_V512;
let Predicates = [HasAVX512, HasVLX] in
defm Z256 : avx512_shuff_packed_128_common<opc, OpcodeStr, sched,
- _.info256, CastInfo.info256,
- EVEX2VEXOvrd>, EVEX_V256;
+ _.info256, CastInfo.info256>, EVEX_V256;
}
defm VSHUFF32X4 : avx512_shuff_packed_128<"vshuff32x4", WriteFShuffle256,
- avx512vl_f32_info, avx512vl_f64_info, 0x23, "VPERM2F128">, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<32, CD8VF>;
+ avx512vl_f32_info, avx512vl_f64_info, 0x23>, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<32, CD8VF>;
defm VSHUFF64X2 : avx512_shuff_packed_128<"vshuff64x2", WriteFShuffle256,
- avx512vl_f64_info, avx512vl_f64_info, 0x23, "VPERM2F128">, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<64, CD8VF>, REX_W;
+ avx512vl_f64_info, avx512vl_f64_info, 0x23>, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<64, CD8VF>, REX_W;
defm VSHUFI32X4 : avx512_shuff_packed_128<"vshufi32x4", WriteFShuffle256,
- avx512vl_i32_info, avx512vl_i64_info, 0x43, "VPERM2I128">, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<32, CD8VF>;
+ avx512vl_i32_info, avx512vl_i64_info, 0x43>, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<32, CD8VF>;
defm VSHUFI64X2 : avx512_shuff_packed_128<"vshufi64x2", WriteFShuffle256,
- avx512vl_i64_info, avx512vl_i64_info, 0x43, "VPERM2I128">, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<64, CD8VF>, REX_W;
+ avx512vl_i64_info, avx512vl_i64_info, 0x43>, AVX512AIi8Base, EVEX, VVVV, EVEX_CD8<64, CD8VF>, REX_W;
multiclass avx512_valign<bits<8> opc, string OpcodeStr,
X86FoldableSchedWrite sched, X86VectorVTInfo _>{
- // NOTE: EVEX2VEXOverride changed back to Unset for 256-bit at the
- // instantiation of this class.
let ExeDomain = _.ExeDomain in {
defm rri : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.RC:$src2, u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
(_.VT (X86VAlign _.RC:$src1, _.RC:$src2, (i8 timm:$src3)))>,
- Sched<[sched]>, EVEX2VEXOverride<"VPALIGNRrri">;
+ Sched<[sched]>;
defm rmi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.MemOp:$src2, u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
(_.VT (X86VAlign _.RC:$src1,
(bitconvert (_.LdFrag addr:$src2)),
(i8 timm:$src3)))>,
- Sched<[sched.Folded, sched.ReadAfterFold]>,
- EVEX2VEXOverride<"VPALIGNRrmi">;
+ Sched<[sched.Folded, sched.ReadAfterFold]>;
defm rmbi : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2, u8imm:$src3),
@@ -10979,7 +10932,6 @@ multiclass avx512_valign_common<string OpcodeStr, X86SchedWriteWidths sched,
defm Z128 : avx512_valign<0x03, OpcodeStr, sched.XMM, _.info128>,
AVX512AIi8Base, EVEX, VVVV, EVEX_V128;
// We can't really override the 256-bit version so change it back to unset.
- let EVEX2VEXOverride = ? in
defm Z256 : avx512_valign<0x03, OpcodeStr, sched.YMM, _.info256>,
AVX512AIi8Base, EVEX, VVVV, EVEX_V256;
}
@@ -11111,7 +11063,7 @@ let Predicates = [HasVLX, HasBWI] in {
defm VDBPSADBW: avx512_common_3Op_rm_imm8<0x42, X86dbpsadbw, "vdbpsadbw",
SchedWritePSADBW, avx512vl_i16_info, avx512vl_i8_info>,
- EVEX_CD8<8, CD8VF>, NotEVEX2VEXConvertible;
+ EVEX_CD8<8, CD8VF>;
multiclass avx512_unary_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
X86FoldableSchedWrite sched, X86VectorVTInfo _> {
@@ -13088,12 +13040,10 @@ multiclass avx512_cvtqq2ph<bits<8> opc, string OpcodeStr, SDPatternOperator OpNo
let Predicates = [HasFP16, HasVLX] in {
defm Z128 : avx512_vcvt_fp<opc, OpcodeStr, v8f16x_info, v2i64x_info,
null_frag, null_frag, sched.XMM, "{1to2}", "{x}",
- i128mem, VK2WM>,
- EVEX_V128, NotEVEX2VEXConvertible;
+ i128mem, VK2WM>, EVEX_V128;
defm Z256 : avx512_vcvt_fp<opc, OpcodeStr, v8f16x_info, v4i64x_info,
null_frag, null_frag, sched.YMM, "{1to4}", "{y}",
- i256mem, VK4WM>,
- EVEX_V256, NotEVEX2VEXConvertible;
+ i256mem, VK4WM>, EVEX_V256;
}
def : InstAlias<OpcodeStr#"x\t{$src, $dst|$dst, $src}",
diff --git a/llvm/lib/Target/X86/X86InstrFormats.td b/llvm/lib/Target/X86/X86InstrFormats.td
index 6e76b44b66a3..8798b13a1761 100644
--- a/llvm/lib/Target/X86/X86InstrFormats.td
+++ b/llvm/lib/Target/X86/X86InstrFormats.td
@@ -247,8 +247,6 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
bit hasREPPrefix = 0; // Does this inst have a REP prefix?
bits<2> OpEncBits = OpEnc.Value;
bit IgnoresW = 0; // Does this inst ignore REX_W field?
- bit EVEX_W1_VEX_W0 = 0; // This EVEX inst with VEX.W==1 can become a VEX
- // instruction with VEX.W == 0.
bit hasVEX_4V = 0; // Does this inst require the VEX.VVVV field?
bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit
@@ -279,10 +277,6 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
CD8_EltSize,
!srl(VectSize, CD8_Form{1-0}))), 0);
- // Used to prevent an explicit EVEX2VEX override for this instruction.
- string EVEX2VEXOverride = ?;
-
- bit notEVEX2VEXConvertible = 0; // Prevent EVEX->VEX conversion.
ExplicitOpPrefix explicitOpPrefix = NoExplicitOpPrefix;
bits<2> explicitOpPrefixBits = explicitOpPrefix.Value;
// TSFlags layout should be kept in sync with X86BaseInfo.h.
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index eac8d79eb8a3..eb0734f9a618 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -29,8 +29,10 @@ class X86Subtarget;
namespace X86 {
enum AsmComments {
+ // For instr that was compressed from EVEX to LEGACY.
+ AC_EVEX_2_LEGACY = MachineInstr::TAsmComments,
// For instr that was compressed from EVEX to VEX.
- AC_EVEX_2_VEX = MachineInstr::TAsmComments
+ AC_EVEX_2_VEX = AC_EVEX_2_LEGACY << 1
};
/// Return a pair of condition code for the given predicate and whether
diff --git a/llvm/lib/Target/X86/X86InstrUtils.td b/llvm/lib/Target/X86/X86InstrUtils.td
index 132941a5734c..f4ae15837fbf 100644
--- a/llvm/lib/Target/X86/X86InstrUtils.td
+++ b/llvm/lib/Target/X86/X86InstrUtils.td
@@ -43,8 +43,6 @@ class XOP { Encoding OpEnc = EncXOP; }
class VEX { Encoding OpEnc = EncVEX; }
class EVEX { Encoding OpEnc = EncEVEX; }
class WIG { bit IgnoresW = 1; }
-// Special version of REX_W that can be changed to VEX.W==0 for EVEX2VEX.
-class VEX_W1X { bit hasREX_W = 1; bit EVEX_W1_VEX_W0 = 1; }
class VEX_L { bit hasVEX_L = 1; }
class VEX_LIG { bit ignoresVEX_L = 1; }
class VVVV { bit hasVEX_4V = 1; }
@@ -66,9 +64,6 @@ class EVEX_CD8<int esize, CD8VForm form> {
}
class NoCD8 { bits<7> CD8_Scale = 0; }
-class EVEX2VEXOverride<string VEXInstrName> {
- string EVEX2VEXOverride = VEXInstrName;
-}
class AVX512BIi8Base : TB, PD {
Domain ExeDomain = SSEPackedInt;
ImmType ImmT = Imm8;
@@ -89,7 +84,6 @@ class AVX512PDIi8Base : TB, PD {
Domain ExeDomain = SSEPackedDouble;
ImmType ImmT = Imm8;
}
-class NotEVEX2VEXConvertible { bit notEVEX2VEXConvertible = 1; }
class ExplicitREX2Prefix { ExplicitOpPrefix explicitOpPrefix = ExplicitREX2; }
class ExplicitVEXPrefix { ExplicitOpPrefix explicitOpPrefix = ExplicitVEX; }
class ExplicitEVEXPrefix { ExplicitOpPrefix explicitOpPrefix = ExplicitEVEX; }
diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp
index e1a67f61e766..133ee2041565 100644
--- a/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -2055,10 +2055,11 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
}
}
- // Add a comment about EVEX-2-VEX compression for AVX-512 instrs that
- // are compressed from EVEX encoding to VEX encoding.
+ // Add a comment about EVEX compression
if (TM.Options.MCOptions.ShowMCEncoding) {
- if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX)
+ if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_LEGACY)
+ OutStreamer->AddComment("EVEX TO LEGACY Compression ", false);
+ else if (MI->getAsmPrinterFlags() & X86::AC_EVEX_2_VEX)
OutStreamer->AddComment("EVEX TO VEX Compression ", false);
}
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 5668b514d6de..b92bffbe6239 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -75,7 +75,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeX86Target() {
initializeGlobalISel(PR);
initializeWinEHStatePassPass(PR);
initializeFixupBWInstPassPass(PR);
- initializeEvexToVexInstPassPass(PR);
+ initializeCompressEVEXPassPass(PR);
initializeFixupLEAPassPass(PR);
initializeFPSPass(PR);
initializeX86FixupSetCCPassPass(PR);
@@ -575,7 +575,7 @@ void X86PassConfig::addPreEmitPass() {
addPass(createX86FixupInstTuning());
addPass(createX86FixupVectorConstants());
}
- addPass(createX86EvexToVexInsts());
+ addPass(createX86CompressEVEXPass());
addPass(createX86DiscriminateMemOpsPass());
addPass(createX86InsertPrefetchPass());
addPass(createX86InsertX87waitPass());
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index a2ff8f3eef81..7c1aff445524 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4920,8 +4920,9 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
}
}
- if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
- BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
+ if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
+ (BO0->hasOneUse() || BO1->hasOneUse()) &&
+ BO0->getOperand(1) == BO1->getOperand(1)) {
switch (BO0->getOpcode()) {
default:
break;
diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index fe5a0578bd97..a19b14087254 100644
--- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -1189,12 +1189,10 @@ static inline Constant *getFuncAddrForProfData(Function *Fn) {
}
static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
- // Don't do this for Darwin. compiler-rt uses linker magic.
- if (TT.isOSDarwin())
- return false;
- // Use linker script magic to get data/cnts/name start/end.
- if (TT.isOSAIX() || TT.isOSLinux() || TT.isOSFreeBSD() || TT.isOSNetBSD() ||
- TT.isOSSolaris() || TT.isOSFuchsia() || TT.isPS() || TT.isOSWindows())
+ // compiler-rt uses linker support to get data/counters/name start/end for
+ // ELF, COFF, Mach-O and XCOFF.
+ if (TT.isOSBinFormatELF() || TT.isOSBinFormatCOFF() ||
+ TT.isOSBinFormatMachO() || TT.isOSBinFormatXCOFF())
return false;
return true;
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 5889dab16265..6fec54ac7922 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -933,15 +933,20 @@ void State::addInfoForInductions(BasicBlock &BB) {
}
DomTreeNode *DTN = DT.getNode(InLoopSucc);
- auto Inc = SE.getMonotonicPredicateType(AR, CmpInst::ICMP_UGT);
- bool MonotonicallyIncreasing =
- Inc && *Inc == ScalarEvolution::MonotonicallyIncreasing;
- if (MonotonicallyIncreasing) {
- // SCEV guarantees that AR does not wrap, so PN >= StartValue can be added
- // unconditionally.
+ auto IncUnsigned = SE.getMonotonicPredicateType(AR, CmpInst::ICMP_UGT);
+ auto IncSigned = SE.getMonotonicPredicateType(AR, CmpInst::ICMP_SGT);
+ bool MonotonicallyIncreasingUnsigned =
+ IncUnsigned && *IncUnsigned == ScalarEvolution::MonotonicallyIncreasing;
+ bool MonotonicallyIncreasingSigned =
+ IncSigned && *IncSigned == ScalarEvolution::MonotonicallyIncreasing;
+ // If SCEV guarantees that AR does not wrap, PN >= StartValue can be added
+ // unconditionally.
+ if (MonotonicallyIncreasingUnsigned)
WorkList.push_back(
FactOrCheck::getConditionFact(DTN, CmpInst::ICMP_UGE, PN, StartValue));
- }
+ if (MonotonicallyIncreasingSigned)
+ WorkList.push_back(
+ FactOrCheck::getConditionFact(DTN, CmpInst::ICMP_SGE, PN, StartValue));
APInt StepOffset;
if (auto *C = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
@@ -965,11 +970,17 @@ void State::addInfoForInductions(BasicBlock &BB) {
WorkList.push_back(FactOrCheck::getConditionFact(
DTN, CmpInst::ICMP_UGE, StartValue, PN,
ConditionTy(CmpInst::ICMP_ULE, B, StartValue)));
+ WorkList.push_back(FactOrCheck::getConditionFact(
+ DTN, CmpInst::ICMP_SGE, StartValue, PN,
+ ConditionTy(CmpInst::ICMP_SLE, B, StartValue)));
// Add PN > B conditional on B <= StartValue which guarantees that the loop
// exits when reaching B with a step of -1.
WorkList.push_back(FactOrCheck::getConditionFact(
DTN, CmpInst::ICMP_UGT, PN, B,
ConditionTy(CmpInst::ICMP_ULE, B, StartValue)));
+ WorkList.push_back(FactOrCheck::getConditionFact(
+ DTN, CmpInst::ICMP_SGT, PN, B,
+ ConditionTy(CmpInst::ICMP_SLE, B, StartValue)));
return;
}
@@ -990,14 +1001,21 @@ void State::addInfoForInductions(BasicBlock &BB) {
// AR may wrap. Add PN >= StartValue conditional on StartValue <= B which
// guarantees that the loop exits before wrapping in combination with the
// restrictions on B and the step above.
- if (!MonotonicallyIncreasing) {
+ if (!MonotonicallyIncreasingUnsigned)
WorkList.push_back(FactOrCheck::getConditionFact(
DTN, CmpInst::ICMP_UGE, PN, StartValue,
ConditionTy(CmpInst::ICMP_ULE, StartValue, B)));
- }
+ if (!MonotonicallyIncreasingSigned)
+ WorkList.push_back(FactOrCheck::getConditionFact(
+ DTN, CmpInst::ICMP_SGE, PN, StartValue,
+ ConditionTy(CmpInst::ICMP_SLE, StartValue, B)));
+
WorkList.push_back(FactOrCheck::getConditionFact(
DTN, CmpInst::ICMP_ULT, PN, B,
ConditionTy(CmpInst::ICMP_ULE, StartValue, B)));
+ WorkList.push_back(FactOrCheck::getConditionFact(
+ DTN, CmpInst::ICMP_SLT, PN, B,
+ ConditionTy(CmpInst::ICMP_SLE, StartValue, B)));
}
void State::addInfoFor(BasicBlock &BB) {
diff --git a/llvm/lib/Transforms/Utils/SCCPSolver.cpp b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
index ab95698abc43..3dc6016a0a37 100644
--- a/llvm/lib/Transforms/Utils/SCCPSolver.cpp
+++ b/llvm/lib/Transforms/Utils/SCCPSolver.cpp
@@ -310,6 +310,7 @@ bool SCCPSolver::removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU,
new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB);
}
+ DefaultDest->removePredecessor(BB);
SI->setDefaultDest(NewUnreachableBB);
Updates.push_back({DominatorTree::Delete, BB, DefaultDest});
Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB});
@@ -1063,14 +1064,17 @@ void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
// is ready.
if (SCValue.isConstantRange(/*UndefAllowed=*/false)) {
const ConstantRange &Range = SCValue.getConstantRange();
+ unsigned ReachableCaseCount = 0;
for (const auto &Case : SI->cases()) {
const APInt &CaseValue = Case.getCaseValue()->getValue();
- if (Range.contains(CaseValue))
+ if (Range.contains(CaseValue)) {
Succs[Case.getSuccessorIndex()] = true;
+ ++ReachableCaseCount;
+ }
}
- // TODO: Determine whether default case is reachable.
- Succs[SI->case_default()->getSuccessorIndex()] = true;
+ Succs[SI->case_default()->getSuccessorIndex()] =
+ Range.isSizeLargerThan(ReachableCaseCount);
return;
}
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 10c068e3b589..8b6212aaa358 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -8077,8 +8077,11 @@ VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
VPValue *Ptr = isa<LoadInst>(I) ? Operands[0] : Operands[1];
if (Consecutive) {
- auto *VectorPtr = new VPVectorPointerRecipe(Ptr, getLoadStoreType(I),
- Reverse, I->getDebugLoc());
+ auto *GEP = dyn_cast<GetElementPtrInst>(
+ Ptr->getUnderlyingValue()->stripPointerCasts());
+ auto *VectorPtr = new VPVectorPointerRecipe(
+ Ptr, getLoadStoreType(I), Reverse, GEP ? GEP->isInBounds() : false,
+ I->getDebugLoc());
Builder.getInsertBlock()->appendRecipe(VectorPtr);
Ptr = VectorPtr;
}
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index cbe767537a1d..8e22b54f002d 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -10596,7 +10596,8 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) {
inversePermutation(E->ReorderIndices, ReorderMask);
if (!ReorderMask.empty())
reorderScalars(GatheredScalars, ReorderMask);
- auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF) {
+ auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF,
+ unsigned I, unsigned SliceSize) {
if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) {
return isa<UndefValue>(V) && !isa<PoisonValue>(V);
}))
@@ -10619,11 +10620,13 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) {
Idx == 0) ||
(Mask.size() == InputVF &&
ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) {
- std::iota(Mask.begin(), Mask.end(), 0);
+ std::iota(std::next(Mask.begin(), I * SliceSize),
+ std::next(Mask.begin(), (I + 1) * SliceSize), 0);
} else {
- unsigned I =
+ unsigned IVal =
*find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; });
- std::fill(Mask.begin(), Mask.end(), I);
+ std::fill(std::next(Mask.begin(), I * SliceSize),
+ std::next(Mask.begin(), (I + 1) * SliceSize), IVal);
}
return true;
};
@@ -10872,7 +10875,8 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) {
} else if (Vec1) {
IsUsedInExpr &= FindReusedSplat(
ExtractMask,
- cast<FixedVectorType>(Vec1->getType())->getNumElements());
+ cast<FixedVectorType>(Vec1->getType())->getNumElements(), 0,
+ ExtractMask.size());
ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true);
IsNonPoisoned &= isGuaranteedNotToBePoison(Vec1);
} else {
@@ -10898,7 +10902,7 @@ ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Args &...Params) {
copy(SubMask, std::next(VecMask.begin(), I * SliceSize));
if (TEs.size() == 1) {
IsUsedInExpr &=
- FindReusedSplat(VecMask, TEs.front()->getVectorFactor());
+ FindReusedSplat(VecMask, TEs.front()->getVectorFactor(), I, SliceSize);
ShuffleBuilder.add(*TEs.front(), VecMask);
if (TEs.front()->VectorizedValue)
IsNonPoisoned &=
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 9d279da758ec..4b4f4911eb64 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -842,6 +842,12 @@ public:
WrapFlagsTy(bool HasNUW, bool HasNSW) : HasNUW(HasNUW), HasNSW(HasNSW) {}
};
+protected:
+ struct GEPFlagsTy {
+ char IsInBounds : 1;
+ GEPFlagsTy(bool IsInBounds) : IsInBounds(IsInBounds) {}
+ };
+
private:
struct DisjointFlagsTy {
char IsDisjoint : 1;
@@ -849,9 +855,6 @@ private:
struct ExactFlagsTy {
char IsExact : 1;
};
- struct GEPFlagsTy {
- char IsInBounds : 1;
- };
struct NonNegFlagsTy {
char NonNeg : 1;
};
@@ -933,12 +936,21 @@ public:
: VPRecipeBase(SC, Operands, DL), OpType(OperationType::FPMathOp),
FMFs(FMFs) {}
+protected:
+ template <typename IterT>
+ VPRecipeWithIRFlags(const unsigned char SC, IterT Operands,
+ GEPFlagsTy GEPFlags, DebugLoc DL = {})
+ : VPRecipeBase(SC, Operands, DL), OpType(OperationType::GEPOp),
+ GEPFlags(GEPFlags) {}
+
+public:
static inline bool classof(const VPRecipeBase *R) {
return R->getVPDefID() == VPRecipeBase::VPInstructionSC ||
R->getVPDefID() == VPRecipeBase::VPWidenSC ||
R->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
R->getVPDefID() == VPRecipeBase::VPWidenCastSC ||
- R->getVPDefID() == VPRecipeBase::VPReplicateSC;
+ R->getVPDefID() == VPRecipeBase::VPReplicateSC ||
+ R->getVPDefID() == VPRecipeBase::VPVectorPointerSC;
}
/// Drop all poison-generating flags.
@@ -1361,15 +1373,16 @@ public:
/// A recipe to compute the pointers for widened memory accesses of IndexTy for
/// all parts. If IsReverse is true, compute pointers for accessing the input in
/// reverse order per part.
-class VPVectorPointerRecipe : public VPRecipeBase, public VPValue {
+class VPVectorPointerRecipe : public VPRecipeWithIRFlags, public VPValue {
Type *IndexedTy;
bool IsReverse;
public:
VPVectorPointerRecipe(VPValue *Ptr, Type *IndexedTy, bool IsReverse,
- DebugLoc DL)
- : VPRecipeBase(VPDef::VPVectorPointerSC, {Ptr}, DL), VPValue(this),
- IndexedTy(IndexedTy), IsReverse(IsReverse) {}
+ bool IsInBounds, DebugLoc DL)
+ : VPRecipeWithIRFlags(VPDef::VPVectorPointerSC, ArrayRef<VPValue *>(Ptr),
+ GEPFlagsTy(IsInBounds), DL),
+ VPValue(this), IndexedTy(IndexedTy), IsReverse(IsReverse) {}
VP_CLASSOF_IMPL(VPDef::VPVectorPointerSC)
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 1e5273bcd748..349544f9e390 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -121,6 +121,7 @@ bool VPRecipeBase::mayHaveSideEffects() const {
case VPInstructionSC:
switch (cast<VPInstruction>(this)->getOpcode()) {
case Instruction::ICmp:
+ case Instruction::Select:
case VPInstruction::Not:
case VPInstruction::CalculateTripCountMinusVF:
case VPInstruction::CanonicalIVIncrementForPart:
@@ -1307,9 +1308,7 @@ void VPVectorPointerRecipe ::execute(VPTransformState &State) {
? DL.getIndexType(IndexedTy->getPointerTo())
: Builder.getInt32Ty();
Value *Ptr = State.get(getOperand(0), VPIteration(0, 0));
- bool InBounds = false;
- if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
- InBounds = GEP->isInBounds();
+ bool InBounds = isInBounds();
if (IsReverse) {
// If the address is consecutive but reversed, then the
// wide store needs to start at the last vector element.
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir
index 860df510b211..4c0e191f7196 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ext-debugloc.mir
@@ -2,7 +2,7 @@
# Check that when we combine ZEXT/ANYEXT we assign the correct location.
# CHECK: !8 = !DILocation(line: 23, column: 5, scope: !4)
-# CHECK: G_AND %15, %16, debug-location !8
+# CHECK: G_AND %14, %15, debug-location !8
--- |
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-split.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-split.ll
new file mode 100644
index 000000000000..55cf48ed2245
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-switch-split.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel -o - %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "arm64-apple-macosx14.0.0"
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #0
+
+declare i32 @logg(...)
+
+define i32 @scanfile(i32 %call148) {
+; CHECK-LABEL: scanfile:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -8
+; CHECK-NEXT: .cfi_offset w29, -16
+; CHECK-NEXT: mov w8, w0
+; CHECK-NEXT: cmp w0, #1
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: b.ge LBB0_3
+; CHECK-NEXT: ; %bb.1: ; %entry
+; CHECK-NEXT: cbnz w8, LBB0_7
+; CHECK-NEXT: LBB0_2: ; %common.ret1
+; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: LBB0_3: ; %entry
+; CHECK-NEXT: b.eq LBB0_2
+; CHECK-NEXT: ; %bb.4: ; %entry
+; CHECK-NEXT: cmp w8, #2
+; CHECK-NEXT: b.eq LBB0_6
+; CHECK-NEXT: ; %bb.5: ; %entry
+; CHECK-NEXT: cmp w8, #3
+; CHECK-NEXT: b.ne LBB0_2
+; CHECK-NEXT: LBB0_6: ; %sw.bb300
+; CHECK-NEXT: bl _logg
+; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: LBB0_7: ; %entry
+; CHECK-NEXT: cmn w8, #2
+; CHECK-NEXT: b.eq LBB0_9
+; CHECK-NEXT: ; %bb.8: ; %entry
+; CHECK-NEXT: cmn w8, #1
+; CHECK-NEXT: b.ne LBB0_2
+; CHECK-NEXT: LBB0_9: ; %sw.bb150
+; CHECK-NEXT: bl _logg
+; CHECK-NEXT: brk #0x1
+entry:
+ switch i32 %call148, label %common.ret [
+ i32 -1, label %sw.bb
+ i32 -2, label %sw.bb150
+ i32 0, label %sw.bb152
+ i32 1, label %sw.bb178
+ i32 2, label %sw.bb200
+ i32 3, label %sw.bb300
+ ]
+
+sw.bb: ; preds = %entry
+ %call149 = call i32 (...) @logg()
+ unreachable
+
+sw.bb150: ; preds = %entry
+ %call151 = call i32 (...) @logg()
+ unreachable
+
+sw.bb200:
+ %res = call i32 (...) @logg()
+ ret i32 %res
+
+sw.bb300:
+ %res2 = call i32 (...) @logg()
+ ret i32 %res2
+
+common.ret: ; preds = %sw.bb178, %sw.bb152, %entry
+ ret i32 0
+
+sw.bb152: ; preds = %entry
+ %tobool154.not = icmp eq i32 0, 0
+ br label %common.ret
+
+sw.bb178: ; preds = %entry
+ call void @llvm.lifetime.start.p0(i64 0, ptr null)
+ br label %common.ret
+}
+
+attributes #0 = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) }
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir
index db576419a764..7b3547159f18 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extending-loads.mir
@@ -8,7 +8,7 @@
entry:
ret void
}
- define void @test_no_anyext(i8* %addr) {
+ define void @test_s32_to_s64(i8* %addr) {
entry:
ret void
}
@@ -21,9 +21,11 @@ body: |
bb.0.entry:
liveins: $x0
; CHECK-LABEL: name: test_zeroext
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8) from %ir.addr)
- ; CHECK: $w0 = COPY [[ZEXTLOAD]](s32)
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8) from %ir.addr)
+ ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
%2:_(s32) = G_ZEXT %1
@@ -31,18 +33,17 @@ body: |
...
---
-name: test_no_anyext
+name: test_s32_to_s64
legalized: true
body: |
bb.0.entry:
liveins: $x0
- ; Check that we don't try to do an anyext combine. We don't want to do this
- ; because an anyexting load like s64 = G_LOAD %p (load 4) isn't legal.
- ; CHECK-LABEL: name: test_no_anyext
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.addr)
- ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
- ; CHECK: $x0 = COPY [[ANYEXT]](s64)
+ ; CHECK-LABEL: name: test_s32_to_s64
+ ; CHECK: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.addr)
+ ; CHECK-NEXT: $x0 = COPY [[LOAD]](s64)
%0:_(p0) = COPY $x0
%1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.addr)
%2:_(s64) = G_ANYEXT %1
diff --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll
index e22b57c8af44..c6e3c3540f6e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zip.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll
@@ -328,7 +328,7 @@ define <4 x i32> @shuffle_zip3(<4 x i32> %arg) {
; CHECK-NEXT: zip2.4h v0, v0, v1
; CHECK-NEXT: movi.4s v1, #1
; CHECK-NEXT: zip1.4h v0, v0, v0
-; CHECK-NEXT: ushll.4s v0, v0, #0
+; CHECK-NEXT: sshll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: ret
bb:
diff --git a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll b/llvm/test/CodeGen/AArch64/replace-with-veclib-armpl.ll
index d41870ec6e79..4480a90a2728 100644
--- a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-armpl.ll
+++ b/llvm/test/CodeGen/AArch64/replace-with-veclib-armpl.ll
@@ -15,7 +15,7 @@ declare <vscale x 2 x double> @llvm.cos.nxv2f64(<vscale x 2 x double>)
declare <vscale x 4 x float> @llvm.cos.nxv4f32(<vscale x 4 x float>)
;.
-; CHECK: @llvm.compiler.used = appending global [32 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_svcos_f64_x, ptr @armpl_svcos_f32_x, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_svsin_f64_x, ptr @armpl_svsin_f32_x, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_svexp_f64_x, ptr @armpl_svexp_f32_x, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_svexp2_f64_x, ptr @armpl_svexp2_f32_x, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_svexp10_f64_x, ptr @armpl_svexp10_f32_x, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_svlog_f64_x, ptr @armpl_svlog_f32_x, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_svlog2_f64_x, ptr @armpl_svlog2_f32_x, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32, ptr @armpl_svlog10_f64_x, ptr @armpl_svlog10_f32_x], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [36 x ptr] [ptr @armpl_vcosq_f64, ptr @armpl_vcosq_f32, ptr @armpl_svcos_f64_x, ptr @armpl_svcos_f32_x, ptr @armpl_vsinq_f64, ptr @armpl_vsinq_f32, ptr @armpl_svsin_f64_x, ptr @armpl_svsin_f32_x, ptr @armpl_vexpq_f64, ptr @armpl_vexpq_f32, ptr @armpl_svexp_f64_x, ptr @armpl_svexp_f32_x, ptr @armpl_vexp2q_f64, ptr @armpl_vexp2q_f32, ptr @armpl_svexp2_f64_x, ptr @armpl_svexp2_f32_x, ptr @armpl_vexp10q_f64, ptr @armpl_vexp10q_f32, ptr @armpl_svexp10_f64_x, ptr @armpl_svexp10_f32_x, ptr @armpl_vlogq_f64, ptr @armpl_vlogq_f32, ptr @armpl_svlog_f64_x, ptr @armpl_svlog_f32_x, ptr @armpl_vlog2q_f64, ptr @armpl_vlog2q_f32, ptr @armpl_svlog2_f64_x, ptr @armpl_svlog2_f32_x, ptr @armpl_vlog10q_f64, ptr @armpl_vlog10q_f32, ptr @armpl_svlog10_f64_x, ptr @armpl_svlog10_f32_x, ptr @armpl_vfmodq_f64, ptr @armpl_vfmodq_f32, ptr @armpl_svfmod_f64_x, ptr @armpl_svfmod_f32_x], section "llvm.metadata"
;.
define <2 x double> @llvm_cos_f64(<2 x double> %in) {
; CHECK-LABEL: define <2 x double> @llvm_cos_f64
@@ -424,6 +424,46 @@ define <vscale x 4 x float> @llvm_pow_vscale_f32(<vscale x 4 x float> %in, <vsca
ret <vscale x 4 x float> %1
}
+define <2 x double> @frem_f64(<2 x double> %in) {
+; CHECK-LABEL: define <2 x double> @frem_f64
+; CHECK-SAME: (<2 x double> [[IN:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x double> @armpl_vfmodq_f64(<2 x double> [[IN]], <2 x double> [[IN]])
+; CHECK-NEXT: ret <2 x double> [[TMP1]]
+;
+ %1= frem <2 x double> %in, %in
+ ret <2 x double> %1
+}
+
+define <4 x float> @frem_f32(<4 x float> %in) {
+; CHECK-LABEL: define <4 x float> @frem_f32
+; CHECK-SAME: (<4 x float> [[IN:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @armpl_vfmodq_f32(<4 x float> [[IN]], <4 x float> [[IN]])
+; CHECK-NEXT: ret <4 x float> [[TMP1]]
+;
+ %1= frem <4 x float> %in, %in
+ ret <4 x float> %1
+}
+
+define <vscale x 2 x double> @frem_vscale_f64(<vscale x 2 x double> %in) #0 {
+; CHECK-LABEL: define <vscale x 2 x double> @frem_vscale_f64
+; CHECK-SAME: (<vscale x 2 x double> [[IN:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @armpl_svfmod_f64_x(<vscale x 2 x double> [[IN]], <vscale x 2 x double> [[IN]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
+; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
+;
+ %1= frem <vscale x 2 x double> %in, %in
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 4 x float> @frem_vscale_f32(<vscale x 4 x float> %in) #0 {
+; CHECK-LABEL: define <vscale x 4 x float> @frem_vscale_f32
+; CHECK-SAME: (<vscale x 4 x float> [[IN:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @armpl_svfmod_f32_x(<vscale x 4 x float> [[IN]], <vscale x 4 x float> [[IN]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
+;
+ %1= frem <vscale x 4 x float> %in, %in
+ ret <vscale x 4 x float> %1
+}
+
attributes #0 = { "target-features"="+sve" }
;.
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
diff --git a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef-scalable.ll b/llvm/test/CodeGen/AArch64/replace-with-veclib-sleef-scalable.ll
index c2ff6014bc69..590dd9effac0 100644
--- a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/replace-with-veclib-sleef-scalable.ll
@@ -4,7 +4,7 @@
target triple = "aarch64-unknown-linux-gnu"
;.
-; CHECK: @llvm.compiler.used = appending global [16 x ptr] [ptr @_ZGVsMxv_cos, ptr @_ZGVsMxv_cosf, ptr @_ZGVsMxv_exp, ptr @_ZGVsMxv_expf, ptr @_ZGVsMxv_exp2, ptr @_ZGVsMxv_exp2f, ptr @_ZGVsMxv_exp10, ptr @_ZGVsMxv_exp10f, ptr @_ZGVsMxv_log, ptr @_ZGVsMxv_logf, ptr @_ZGVsMxv_log10, ptr @_ZGVsMxv_log10f, ptr @_ZGVsMxv_log2, ptr @_ZGVsMxv_log2f, ptr @_ZGVsMxv_sin, ptr @_ZGVsMxv_sinf], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [18 x ptr] [ptr @_ZGVsMxv_cos, ptr @_ZGVsMxv_cosf, ptr @_ZGVsMxv_exp, ptr @_ZGVsMxv_expf, ptr @_ZGVsMxv_exp2, ptr @_ZGVsMxv_exp2f, ptr @_ZGVsMxv_exp10, ptr @_ZGVsMxv_exp10f, ptr @_ZGVsMxv_log, ptr @_ZGVsMxv_logf, ptr @_ZGVsMxv_log10, ptr @_ZGVsMxv_log10f, ptr @_ZGVsMxv_log2, ptr @_ZGVsMxv_log2f, ptr @_ZGVsMxv_sin, ptr @_ZGVsMxv_sinf, ptr @_ZGVsMxvv_fmod, ptr @_ZGVsMxvv_fmodf], section "llvm.metadata"
;.
define <vscale x 2 x double> @llvm_ceil_vscale_f64(<vscale x 2 x double> %in) {
; CHECK-LABEL: @llvm_ceil_vscale_f64(
@@ -384,6 +384,24 @@ define <vscale x 4 x float> @llvm_trunc_vscale_f32(<vscale x 4 x float> %in) {
ret <vscale x 4 x float> %1
}
+define <vscale x 2 x double> @frem_f64(<vscale x 2 x double> %in) {
+; CHECK-LABEL: @frem_f64(
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_fmod(<vscale x 2 x double> [[IN:%.*]], <vscale x 2 x double> [[IN]], <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer))
+; CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
+;
+ %1= frem <vscale x 2 x double> %in, %in
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 4 x float> @frem_f32(<vscale x 4 x float> %in) {
+; CHECK-LABEL: @frem_f32(
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_fmodf(<vscale x 4 x float> [[IN:%.*]], <vscale x 4 x float> [[IN]], <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
+;
+ %1= frem <vscale x 4 x float> %in, %in
+ ret <vscale x 4 x float> %1
+}
+
declare <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double>)
declare <vscale x 4 x float> @llvm.ceil.nxv4f32(<vscale x 4 x float>)
declare <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
diff --git a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll b/llvm/test/CodeGen/AArch64/replace-with-veclib-sleef.ll
index be247de36805..865a46009b20 100644
--- a/llvm/test/CodeGen/AArch64/replace-intrinsics-with-veclib-sleef.ll
+++ b/llvm/test/CodeGen/AArch64/replace-with-veclib-sleef.ll
@@ -4,7 +4,7 @@
target triple = "aarch64-unknown-linux-gnu"
;.
-; CHECK: @llvm.compiler.used = appending global [16 x ptr] [ptr @_ZGVnN2v_cos, ptr @_ZGVnN4v_cosf, ptr @_ZGVnN2v_exp, ptr @_ZGVnN4v_expf, ptr @_ZGVnN2v_exp2, ptr @_ZGVnN4v_exp2f, ptr @_ZGVnN2v_exp10, ptr @_ZGVnN4v_exp10f, ptr @_ZGVnN2v_log, ptr @_ZGVnN4v_logf, ptr @_ZGVnN2v_log10, ptr @_ZGVnN4v_log10f, ptr @_ZGVnN2v_log2, ptr @_ZGVnN4v_log2f, ptr @_ZGVnN2v_sin, ptr @_ZGVnN4v_sinf], section "llvm.metadata"
+; CHECK: @llvm.compiler.used = appending global [18 x ptr] [ptr @_ZGVnN2v_cos, ptr @_ZGVnN4v_cosf, ptr @_ZGVnN2v_exp, ptr @_ZGVnN4v_expf, ptr @_ZGVnN2v_exp2, ptr @_ZGVnN4v_exp2f, ptr @_ZGVnN2v_exp10, ptr @_ZGVnN4v_exp10f, ptr @_ZGVnN2v_log, ptr @_ZGVnN4v_logf, ptr @_ZGVnN2v_log10, ptr @_ZGVnN4v_log10f, ptr @_ZGVnN2v_log2, ptr @_ZGVnN4v_log2f, ptr @_ZGVnN2v_sin, ptr @_ZGVnN4v_sinf, ptr @_ZGVnN2vv_fmod, ptr @_ZGVnN4vv_fmodf], section "llvm.metadata"
;.
define <2 x double> @llvm_ceil_f64(<2 x double> %in) {
; CHECK-LABEL: @llvm_ceil_f64(
@@ -384,6 +384,24 @@ define <4 x float> @llvm_trunc_f32(<4 x float> %in) {
ret <4 x float> %1
}
+define <2 x double> @frem_f64(<2 x double> %in) {
+; CHECK-LABEL: @frem_f64(
+; CHECK-NEXT: [[TMP1:%.*]] = call <2 x double> @_ZGVnN2vv_fmod(<2 x double> [[IN:%.*]], <2 x double> [[IN]])
+; CHECK-NEXT: ret <2 x double> [[TMP1]]
+;
+ %1= frem <2 x double> %in, %in
+ ret <2 x double> %1
+}
+
+define <4 x float> @frem_f32(<4 x float> %in) {
+; CHECK-LABEL: @frem_f32(
+; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @_ZGVnN4vv_fmodf(<4 x float> [[IN:%.*]], <4 x float> [[IN]])
+; CHECK-NEXT: ret <4 x float> [[TMP1]]
+;
+ %1= frem <4 x float> %in, %in
+ ret <4 x float> %1
+}
+
declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>)
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index b24967d5a130..5fa28f77dc28 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -6624,6 +6624,74 @@ entry:
ret i32 %op.rdx.7
}
+define i32 @extract_hi_lo(<8 x i16> %a) {
+; CHECK-SD-BASE-LABEL: extract_hi_lo:
+; CHECK-SD-BASE: // %bb.0: // %entry
+; CHECK-SD-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-SD-BASE-NEXT: fmov w0, s0
+; CHECK-SD-BASE-NEXT: ret
+;
+; CHECK-SD-DOT-LABEL: extract_hi_lo:
+; CHECK-SD-DOT: // %bb.0: // %entry
+; CHECK-SD-DOT-NEXT: uaddlv s0, v0.8h
+; CHECK-SD-DOT-NEXT: fmov w0, s0
+; CHECK-SD-DOT-NEXT: ret
+;
+; CHECK-GI-BASE-LABEL: extract_hi_lo:
+; CHECK-GI-BASE: // %bb.0: // %entry
+; CHECK-GI-BASE-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-BASE-NEXT: uaddw2 v0.4s, v1.4s, v0.8h
+; CHECK-GI-BASE-NEXT: addv s0, v0.4s
+; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ret
+;
+; CHECK-GI-DOT-LABEL: extract_hi_lo:
+; CHECK-GI-DOT: // %bb.0: // %entry
+; CHECK-GI-DOT-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-DOT-NEXT: uaddw2 v0.4s, v1.4s, v0.8h
+; CHECK-GI-DOT-NEXT: addv s0, v0.4s
+; CHECK-GI-DOT-NEXT: fmov w0, s0
+; CHECK-GI-DOT-NEXT: ret
+entry:
+ %e1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %e2 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %z1 = zext <4 x i16> %e1 to <4 x i32>
+ %z2 = zext <4 x i16> %e2 to <4 x i32>
+ %z4 = add <4 x i32> %z1, %z2
+ %z5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %z4)
+ ret i32 %z5
+}
+
+define i32 @extract_hi_hi(<8 x i16> %a) {
+; CHECK-LABEL: extract_hi_hi:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: uaddl2 v0.4s, v0.8h, v0.8h
+; CHECK-NEXT: addv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %e2 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %z2 = zext <4 x i16> %e2 to <4 x i32>
+ %z4 = add <4 x i32> %z2, %z2
+ %z5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %z4)
+ ret i32 %z5
+}
+
+define i32 @extract_lo_lo(<8 x i16> %a) {
+; CHECK-LABEL: extract_lo_lo:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: uaddl v0.4s, v0.4h, v0.4h
+; CHECK-NEXT: addv s0, v0.4s
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %e1 = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %z1 = zext <4 x i16> %e1 to <4 x i32>
+ %z4 = add <4 x i32> %z1, %z1
+ %z5 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %z4)
+ ret i32 %z5
+}
+
declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1 immarg) #1
declare i16 @llvm.vector.reduce.add.v16i16(<16 x i16>)
declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
diff --git a/llvm/test/CodeGen/AArch64/vselect-ext.ll b/llvm/test/CodeGen/AArch64/vselect-ext.ll
index b80955665c74..0b90343a40c8 100644
--- a/llvm/test/CodeGen/AArch64/vselect-ext.ll
+++ b/llvm/test/CodeGen/AArch64/vselect-ext.ll
@@ -219,17 +219,17 @@ define <3 x i32> @same_zext_used_in_cmp_unsigned_pred_and_select_v3i16(<3 x i8>
; CHECK-NEXT: fmov s0, w0
; CHECK-NEXT: Lloh0:
; CHECK-NEXT: adrp x8, lCPI9_0@PAGE
+; CHECK-NEXT: movi.2d v3, #0x0000ff000000ff
; CHECK-NEXT: Lloh1:
; CHECK-NEXT: ldr d2, [x8, lCPI9_0@PAGEOFF]
; CHECK-NEXT: mov.h v0[1], w1
; CHECK-NEXT: mov.h v0[2], w2
-; CHECK-NEXT: fmov d1, d0
-; CHECK-NEXT: bic.4h v1, #255, lsl #8
-; CHECK-NEXT: cmhi.4h v1, v1, v2
-; CHECK-NEXT: and.8b v0, v0, v1
-; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
-; CHECK-NEXT: ushll.4s v0, v0, #0
-; CHECK-NEXT: and.16b v0, v0, v1
+; CHECK-NEXT: ushll.4s v1, v0, #0
+; CHECK-NEXT: bic.4h v0, #255, lsl #8
+; CHECK-NEXT: cmhi.4h v0, v0, v2
+; CHECK-NEXT: and.16b v1, v1, v3
+; CHECK-NEXT: sshll.4s v0, v0, #0
+; CHECK-NEXT: and.16b v0, v1, v0
; CHECK-NEXT: ret
; CHECK-NEXT: .loh AdrpLdr Lloh0, Lloh1
%ext = zext <3 x i8> %a to <3 x i32>
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-mul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-mul.mir
index 5b0ed61a3313..2bf8649e7624 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-mul.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-mul.mir
@@ -1,9 +1,10 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX6 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX8 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefixes=GCN,GFX6 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefixes=GCN,GFX8PLUS,GFX89,GFX8 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefixes=GCN,GFX8PLUS,GFX89,GFX9PLUS %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefixes=GCN,GFX8PLUS,GFX9PLUS,GFX1011 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefixes=GCN,GFX8PLUS,GFX9PLUS,GFX1011 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -O0 -run-pass=legalizer -o - %s | FileCheck -check-prefixes=GCN,GFX8PLUS,GFX9PLUS,GFX12 %s
---
name: test_mul_s32
@@ -11,34 +12,13 @@ body: |
bb.0:
liveins: $vgpr0, $vgpr1
- ; GFX6-LABEL: name: test_mul_s32
- ; GFX6: liveins: $vgpr0, $vgpr1
- ; GFX6-NEXT: {{ $}}
- ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX6-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX6-NEXT: $vgpr0 = COPY [[MUL]](s32)
- ; GFX8-LABEL: name: test_mul_s32
- ; GFX8: liveins: $vgpr0, $vgpr1
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX8-NEXT: $vgpr0 = COPY [[MUL]](s32)
- ; GFX9-LABEL: name: test_mul_s32
- ; GFX9: liveins: $vgpr0, $vgpr1
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX9-NEXT: $vgpr0 = COPY [[MUL]](s32)
- ; GFX10-LABEL: name: test_mul_s32
- ; GFX10: liveins: $vgpr0, $vgpr1
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX10-NEXT: $vgpr0 = COPY [[MUL]](s32)
+ ; GCN-LABEL: name: test_mul_s32
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GCN-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
+ ; GCN-NEXT: $vgpr0 = COPY [[MUL]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_MUL %0, %1
@@ -51,50 +31,17 @@ body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX6-LABEL: name: test_mul_v2s32
- ; GFX6: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX6-NEXT: {{ $}}
- ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX6-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX6-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX6-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX6-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]]
- ; GFX6-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
- ; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32)
- ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ; GFX8-LABEL: name: test_mul_v2s32
- ; GFX8: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX8-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]]
- ; GFX8-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
- ; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32)
- ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ; GFX9-LABEL: name: test_mul_v2s32
- ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]]
- ; GFX9-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
- ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32)
- ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ; GFX10-LABEL: name: test_mul_v2s32
- ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]]
- ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
- ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32)
- ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ; GCN-LABEL: name: test_mul_v2s32
+ ; GCN: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GCN-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GCN-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GCN-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV2]]
+ ; GCN-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]]
+ ; GCN-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32)
+ ; GCN-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = G_MUL %0, %1
@@ -122,54 +69,48 @@ body: |
; GFX6-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
; GFX6-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL]](s32), [[ADD1]](s32)
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
- ; GFX8-LABEL: name: test_mul_s64
- ; GFX8: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
- ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
- ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
- ; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV5]](s32)
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[ANYEXT]]
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV2]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
- ; GFX8-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[UV6]](s32)
- ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
- ; GFX9-LABEL: name: test_mul_s64
- ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
- ; GFX9-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
- ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
- ; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV5]](s32)
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[ANYEXT]]
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV2]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
- ; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[UV6]](s32)
- ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
- ; GFX10-LABEL: name: test_mul_s64
- ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
- ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
- ; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
- ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
- ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
- ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
- ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
- ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ;
+ ; GFX89-LABEL: name: test_mul_s64
+ ; GFX89: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX89-NEXT: {{ $}}
+ ; GFX89-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX89-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX89-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX89-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX89-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
+ ; GFX89-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX89-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV5]](s32)
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[ANYEXT]]
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV2]], [[AMDGPU_MAD_U64_U32_2]]
+ ; GFX89-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
+ ; GFX89-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[UV6]](s32)
+ ; GFX89-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ;
+ ; GFX1011-LABEL: name: test_mul_s64
+ ; GFX1011: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX1011-NEXT: {{ $}}
+ ; GFX1011-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX1011-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX1011-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX1011-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX1011-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX1011-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
+ ; GFX1011-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX1011-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
+ ; GFX1011-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
+ ; GFX1011-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
+ ; GFX1011-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
+ ; GFX1011-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+ ; GFX1011-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ;
+ ; GFX12-LABEL: name: test_mul_s64
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX12-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0_vgpr1 = COPY [[MUL]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_MUL %0, %1
@@ -209,90 +150,76 @@ body: |
; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL3]](s32), [[ADD3]](s32)
; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
- ; GFX8-LABEL: name: test_mul_v2s64
- ; GFX8: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
- ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
- ; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
- ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV6]], [[C]]
- ; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV9]](s32)
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV7]], [[ANYEXT]]
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV5]](s32), [[UV6]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
- ; GFX8-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV8]](s32), [[UV10]](s32)
- ; GFX8-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
- ; GFX8-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_6:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_7:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV12]](s32), [[UV14]], [[C]]
- ; GFX8-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_6]](s64)
- ; GFX8-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV17]](s32)
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_8:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_9:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV12]](s32), [[UV15]], [[ANYEXT1]]
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_10:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_11:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV13]](s32), [[UV14]], [[AMDGPU_MAD_U64_U32_8]]
- ; GFX8-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_10]](s64)
- ; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV16]](s32), [[UV18]](s32)
- ; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
- ; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
- ; GFX9-LABEL: name: test_mul_v2s64
- ; GFX9: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
- ; GFX9-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
- ; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
- ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV6]], [[C]]
- ; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV9]](s32)
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV7]], [[ANYEXT]]
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV5]](s32), [[UV6]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
- ; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV8]](s32), [[UV10]](s32)
- ; GFX9-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
- ; GFX9-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_6:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_7:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV12]](s32), [[UV14]], [[C]]
- ; GFX9-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_6]](s64)
- ; GFX9-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV17]](s32)
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_8:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_9:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV12]](s32), [[UV15]], [[ANYEXT1]]
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_10:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_11:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV13]](s32), [[UV14]], [[AMDGPU_MAD_U64_U32_8]]
- ; GFX9-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_10]](s64)
- ; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV16]](s32), [[UV18]](s32)
- ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
- ; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
- ; GFX10-LABEL: name: test_mul_v2s64
- ; GFX10: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
- ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
- ; GFX10-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
- ; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV6]], [[C]]
- ; GFX10-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV9]], [[MUL]]
- ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
- ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
- ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV8]](s32), [[ADD1]](s32)
- ; GFX10-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
- ; GFX10-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
- ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV10]](s32), [[UV12]], [[C]]
- ; GFX10-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_2]](s64)
- ; GFX10-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV10]], [[UV13]]
- ; GFX10-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[UV15]], [[MUL2]]
- ; GFX10-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV11]], [[UV12]]
- ; GFX10-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[MUL3]]
- ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV14]](s32), [[ADD3]](s32)
- ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
- ; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ;
+ ; GFX89-LABEL: name: test_mul_v2s64
+ ; GFX89: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX89-NEXT: {{ $}}
+ ; GFX89-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX89-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX89-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX89-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX89-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX89-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX89-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV6]], [[C]]
+ ; GFX89-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX89-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV9]](s32)
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV7]], [[ANYEXT]]
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV5]](s32), [[UV6]], [[AMDGPU_MAD_U64_U32_2]]
+ ; GFX89-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
+ ; GFX89-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV8]](s32), [[UV10]](s32)
+ ; GFX89-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX89-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_6:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_7:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV12]](s32), [[UV14]], [[C]]
+ ; GFX89-NEXT: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_6]](s64)
+ ; GFX89-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV17]](s32)
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_8:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_9:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV12]](s32), [[UV15]], [[ANYEXT1]]
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_10:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_11:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV13]](s32), [[UV14]], [[AMDGPU_MAD_U64_U32_8]]
+ ; GFX89-NEXT: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_10]](s64)
+ ; GFX89-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV16]](s32), [[UV18]](s32)
+ ; GFX89-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
+ ; GFX89-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ;
+ ; GFX1011-LABEL: name: test_mul_v2s64
+ ; GFX1011: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX1011-NEXT: {{ $}}
+ ; GFX1011-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX1011-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX1011-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX1011-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX1011-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX1011-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX1011-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX1011-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV4]](s32), [[UV6]], [[C]]
+ ; GFX1011-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX1011-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV4]], [[UV7]]
+ ; GFX1011-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV9]], [[MUL]]
+ ; GFX1011-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV5]], [[UV6]]
+ ; GFX1011-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
+ ; GFX1011-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV8]](s32), [[ADD1]](s32)
+ ; GFX1011-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX1011-NEXT: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX1011-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV10]](s32), [[UV12]], [[C]]
+ ; GFX1011-NEXT: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_2]](s64)
+ ; GFX1011-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV10]], [[UV13]]
+ ; GFX1011-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[UV15]], [[MUL2]]
+ ; GFX1011-NEXT: [[MUL3:%[0-9]+]]:_(s32) = G_MUL [[UV11]], [[UV12]]
+ ; GFX1011-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[MUL3]]
+ ; GFX1011-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV14]](s32), [[ADD3]](s32)
+ ; GFX1011-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
+ ; GFX1011-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ;
+ ; GFX12-LABEL: name: test_mul_v2s64
+ ; GFX12: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX12-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX12-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[UV]], [[UV2]]
+ ; GFX12-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[UV1]], [[UV3]]
+ ; GFX12-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL1]](s64)
+ ; GFX12-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
%2:_(<2 x s64>) = G_MUL %0, %1
@@ -314,36 +241,17 @@ body: |
; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; GFX6-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[MUL]], [[C]]
; GFX6-NEXT: $vgpr0 = COPY [[AND]](s32)
- ; GFX8-LABEL: name: test_mul_s16
- ; GFX8: liveins: $vgpr0, $vgpr1
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX8-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8-NEXT: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[TRUNC]], [[TRUNC1]]
- ; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[MUL]](s16)
- ; GFX8-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
- ; GFX9-LABEL: name: test_mul_s16
- ; GFX9: liveins: $vgpr0, $vgpr1
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9-NEXT: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[TRUNC]], [[TRUNC1]]
- ; GFX9-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[MUL]](s16)
- ; GFX9-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
- ; GFX10-LABEL: name: test_mul_s16
- ; GFX10: liveins: $vgpr0, $vgpr1
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[TRUNC]], [[TRUNC1]]
- ; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[MUL]](s16)
- ; GFX10-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
+ ;
+ ; GFX8PLUS-LABEL: name: test_mul_s16
+ ; GFX8PLUS: liveins: $vgpr0, $vgpr1
+ ; GFX8PLUS-NEXT: {{ $}}
+ ; GFX8PLUS-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX8PLUS-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX8PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX8PLUS-NEXT: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[TRUNC]], [[TRUNC1]]
+ ; GFX8PLUS-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[MUL]](s16)
+ ; GFX8PLUS-NEXT: $vgpr0 = COPY [[ZEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -378,6 +286,7 @@ body: |
; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; GFX6-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; GFX6-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
+ ;
; GFX8-LABEL: name: test_mul_v2s16
; GFX8: liveins: $vgpr0, $vgpr1
; GFX8-NEXT: {{ $}}
@@ -400,20 +309,14 @@ body: |
; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
; GFX8-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; GFX8-NEXT: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
- ; GFX9-LABEL: name: test_mul_v2s16
- ; GFX9: liveins: $vgpr0, $vgpr1
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[COPY]], [[COPY1]]
- ; GFX9-NEXT: $vgpr0 = COPY [[MUL]](<2 x s16>)
- ; GFX10-LABEL: name: test_mul_v2s16
- ; GFX10: liveins: $vgpr0, $vgpr1
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[COPY]], [[COPY1]]
- ; GFX10-NEXT: $vgpr0 = COPY [[MUL]](<2 x s16>)
+ ;
+ ; GFX9PLUS-LABEL: name: test_mul_v2s16
+ ; GFX9PLUS: liveins: $vgpr0, $vgpr1
+ ; GFX9PLUS-NEXT: {{ $}}
+ ; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX9PLUS-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX9PLUS-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[COPY]], [[COPY1]]
+ ; GFX9PLUS-NEXT: $vgpr0 = COPY [[MUL]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_MUL %0, %1
@@ -441,6 +344,7 @@ body: |
; GFX6-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[COPY2]], [[COPY5]]
; GFX6-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[MUL2]](s32)
; GFX6-NEXT: S_ENDPGM 0, implicit [[TRUNC]](s16), implicit [[TRUNC1]](s16), implicit [[TRUNC2]](s16)
+ ;
; GFX8-LABEL: name: test_mul_v3s16
; GFX8: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GFX8-NEXT: {{ $}}
@@ -460,66 +364,37 @@ body: |
; GFX8-NEXT: [[MUL1:%[0-9]+]]:_(s16) = G_MUL [[TRUNC1]], [[TRUNC4]]
; GFX8-NEXT: [[MUL2:%[0-9]+]]:_(s16) = G_MUL [[TRUNC2]], [[TRUNC5]]
; GFX8-NEXT: S_ENDPGM 0, implicit [[MUL]](s16), implicit [[MUL1]](s16), implicit [[MUL2]](s16)
- ; GFX9-LABEL: name: test_mul_v3s16
- ; GFX9: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
- ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
- ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
- ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
- ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
- ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
- ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
- ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
- ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
- ; GFX9-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
- ; GFX9-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
- ; GFX9-NEXT: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
- ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[MUL]](<2 x s16>)
- ; GFX9-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[MUL1]](<2 x s16>)
- ; GFX9-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-NEXT: S_ENDPGM 0, implicit [[TRUNC6]](s16), implicit [[TRUNC7]](s16), implicit [[TRUNC8]](s16)
- ; GFX10-LABEL: name: test_mul_v3s16
- ; GFX10: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
- ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
- ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
- ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
- ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
- ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
- ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
- ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
- ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
- ; GFX10-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
- ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
- ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[MUL]](<2 x s16>)
- ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX10-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[MUL1]](<2 x s16>)
- ; GFX10-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX10-NEXT: S_ENDPGM 0, implicit [[TRUNC6]](s16), implicit [[TRUNC7]](s16), implicit [[TRUNC8]](s16)
+ ;
+ ; GFX9PLUS-LABEL: name: test_mul_v3s16
+ ; GFX9PLUS: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX9PLUS-NEXT: {{ $}}
+ ; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9PLUS-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9PLUS-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; GFX9PLUS-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+ ; GFX9PLUS-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; GFX9PLUS-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
+ ; GFX9PLUS-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9PLUS-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9PLUS-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+ ; GFX9PLUS-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
+ ; GFX9PLUS-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[COPY4]](s32)
+ ; GFX9PLUS-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[COPY5]](s32)
+ ; GFX9PLUS-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+ ; GFX9PLUS-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
+ ; GFX9PLUS-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC2]](s16), [[DEF]](s16)
+ ; GFX9PLUS-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC3]](s16), [[TRUNC4]](s16)
+ ; GFX9PLUS-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC5]](s16), [[DEF]](s16)
+ ; GFX9PLUS-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+ ; GFX9PLUS-NEXT: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+ ; GFX9PLUS-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[MUL]](<2 x s16>)
+ ; GFX9PLUS-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX9PLUS-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9PLUS-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX9PLUS-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX9PLUS-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[MUL1]](<2 x s16>)
+ ; GFX9PLUS-NEXT: [[TRUNC8:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX9PLUS-NEXT: S_ENDPGM 0, implicit [[TRUNC6]](s16), implicit [[TRUNC7]](s16), implicit [[TRUNC8]](s16)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = COPY $vgpr2
@@ -578,6 +453,7 @@ body: |
; GFX6-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
; GFX6-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ;
; GFX8-LABEL: name: test_mul_v4s16
; GFX8: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX8-NEXT: {{ $}}
@@ -618,28 +494,18 @@ body: |
; GFX8-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
; GFX8-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
- ; GFX9-LABEL: name: test_mul_v4s16
- ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[UV]], [[UV2]]
- ; GFX9-NEXT: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[UV1]], [[UV3]]
- ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[MUL]](<2 x s16>), [[MUL1]](<2 x s16>)
- ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
- ; GFX10-LABEL: name: test_mul_v4s16
- ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[UV]], [[UV2]]
- ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[UV1]], [[UV3]]
- ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[MUL]](<2 x s16>), [[MUL1]](<2 x s16>)
- ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ;
+ ; GFX9PLUS-LABEL: name: test_mul_v4s16
+ ; GFX9PLUS: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX9PLUS-NEXT: {{ $}}
+ ; GFX9PLUS-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX9PLUS-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX9PLUS-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX9PLUS-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX9PLUS-NEXT: [[MUL:%[0-9]+]]:_(<2 x s16>) = G_MUL [[UV]], [[UV2]]
+ ; GFX9PLUS-NEXT: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[UV1]], [[UV3]]
+ ; GFX9PLUS-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[MUL]](<2 x s16>), [[MUL1]](<2 x s16>)
+ ; GFX9PLUS-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_MUL %0, %1
@@ -652,34 +518,13 @@ body: |
bb.0:
liveins: $vgpr0, $vgpr1
- ; GFX6-LABEL: name: test_mul_s24
- ; GFX6: liveins: $vgpr0, $vgpr1
- ; GFX6-NEXT: {{ $}}
- ; GFX6-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX6-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX6-NEXT: $vgpr0 = COPY [[MUL]](s32)
- ; GFX8-LABEL: name: test_mul_s24
- ; GFX8: liveins: $vgpr0, $vgpr1
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX8-NEXT: $vgpr0 = COPY [[MUL]](s32)
- ; GFX9-LABEL: name: test_mul_s24
- ; GFX9: liveins: $vgpr0, $vgpr1
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX9-NEXT: $vgpr0 = COPY [[MUL]](s32)
- ; GFX10-LABEL: name: test_mul_s24
- ; GFX10: liveins: $vgpr0, $vgpr1
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; GFX10-NEXT: $vgpr0 = COPY [[MUL]](s32)
+ ; GCN-LABEL: name: test_mul_s24
+ ; GCN: liveins: $vgpr0, $vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GCN-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GCN-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
+ ; GCN-NEXT: $vgpr0 = COPY [[MUL]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s24) = G_TRUNC %0
@@ -709,54 +554,48 @@ body: |
; GFX6-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[UMULH]]
; GFX6-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MUL]](s32), [[ADD1]](s32)
; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
- ; GFX8-LABEL: name: test_mul_s33
- ; GFX8: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
- ; GFX8-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
- ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
- ; GFX8-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV5]](s32)
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[ANYEXT]]
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV2]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
- ; GFX8-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[UV6]](s32)
- ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
- ; GFX9-LABEL: name: test_mul_s33
- ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
- ; GFX9-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
- ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
- ; GFX9-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV5]](s32)
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[ANYEXT]]
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV2]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
- ; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[UV6]](s32)
- ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
- ; GFX10-LABEL: name: test_mul_s33
- ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
- ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
- ; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
- ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
- ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
- ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
- ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
- ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ;
+ ; GFX89-LABEL: name: test_mul_s33
+ ; GFX89: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX89-NEXT: {{ $}}
+ ; GFX89-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX89-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX89-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX89-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX89-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
+ ; GFX89-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX89-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV5]](s32)
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[ANYEXT]]
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV2]], [[AMDGPU_MAD_U64_U32_2]]
+ ; GFX89-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
+ ; GFX89-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[UV6]](s32)
+ ; GFX89-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ;
+ ; GFX1011-LABEL: name: test_mul_s33
+ ; GFX1011: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX1011-NEXT: {{ $}}
+ ; GFX1011-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX1011-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX1011-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX1011-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX1011-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX1011-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
+ ; GFX1011-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX1011-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
+ ; GFX1011-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
+ ; GFX1011-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
+ ; GFX1011-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
+ ; GFX1011-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+ ; GFX1011-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+ ;
+ ; GFX12-LABEL: name: test_mul_s33
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX12-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0_vgpr1 = COPY [[MUL]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s33) = G_TRUNC %0
@@ -800,67 +639,71 @@ body: |
; GFX6-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ADD]]
; GFX6-NEXT: [[MV:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[MUL]](s32), [[UADDO2]](s32), [[ADD5]](s32)
; GFX6-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[MV]](s96)
- ; GFX8-LABEL: name: test_mul_s96
- ; GFX8: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
- ; GFX8-NEXT: {{ $}}
- ; GFX8-NEXT: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
- ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr3_vgpr4_vgpr5
- ; GFX8-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
- ; GFX8-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s96)
- ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[C]]
- ; GFX8-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV5]], [[C]]
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV4]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_6:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_7:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV2]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_4]]
- ; GFX8-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_6]](s64)
- ; GFX8-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV7]](s32), [[UV8]](s32)
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_8:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_9:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV4]], [[MV]]
- ; GFX8-NEXT: [[AMDGPU_MAD_U64_U32_10:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_11:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_8]]
- ; GFX8-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_10]](s64)
- ; GFX8-NEXT: [[MV1:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[UV6]](s32), [[UV10]](s32), [[UV11]](s32)
- ; GFX8-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[MV1]](s96)
- ; GFX9-LABEL: name: test_mul_s96
- ; GFX9: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
- ; GFX9-NEXT: {{ $}}
- ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
- ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr3_vgpr4_vgpr5
- ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
- ; GFX9-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s96)
- ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[C]]
- ; GFX9-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV5]], [[C]]
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV4]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_6:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_7:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV2]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_4]]
- ; GFX9-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_6]](s64)
- ; GFX9-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV7]](s32), [[UV8]](s32)
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_8:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_9:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV4]], [[MV]]
- ; GFX9-NEXT: [[AMDGPU_MAD_U64_U32_10:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_11:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_8]]
- ; GFX9-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_10]](s64)
- ; GFX9-NEXT: [[MV1:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[UV6]](s32), [[UV10]](s32), [[UV11]](s32)
- ; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[MV1]](s96)
- ; GFX10-LABEL: name: test_mul_s96
- ; GFX10: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
- ; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
- ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr3_vgpr4_vgpr5
- ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
- ; GFX10-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s96)
- ; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[C]]
- ; GFX10-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
- ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV5]]
- ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV4]]
- ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[MUL1]]
- ; GFX10-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV2]], [[UV3]]
- ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL2]]
- ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV7]](s32), [[ADD1]](s32)
- ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV4]], [[MV]]
- ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_2]]
- ; GFX10-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
- ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[UV6]](s32), [[UV8]](s32), [[UV9]](s32)
- ; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[MV1]](s96)
+ ;
+ ; GFX89-LABEL: name: test_mul_s96
+ ; GFX89: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
+ ; GFX89-NEXT: {{ $}}
+ ; GFX89-NEXT: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
+ ; GFX89-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr3_vgpr4_vgpr5
+ ; GFX89-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
+ ; GFX89-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s96)
+ ; GFX89-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[C]]
+ ; GFX89-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV5]], [[C]]
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV4]], [[AMDGPU_MAD_U64_U32_2]]
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_6:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_7:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV2]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_4]]
+ ; GFX89-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_6]](s64)
+ ; GFX89-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV7]](s32), [[UV8]](s32)
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_8:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_9:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV4]], [[MV]]
+ ; GFX89-NEXT: [[AMDGPU_MAD_U64_U32_10:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_11:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_8]]
+ ; GFX89-NEXT: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_10]](s64)
+ ; GFX89-NEXT: [[MV1:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[UV6]](s32), [[UV10]](s32), [[UV11]](s32)
+ ; GFX89-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[MV1]](s96)
+ ;
+ ; GFX1011-LABEL: name: test_mul_s96
+ ; GFX1011: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
+ ; GFX1011-NEXT: {{ $}}
+ ; GFX1011-NEXT: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
+ ; GFX1011-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr3_vgpr4_vgpr5
+ ; GFX1011-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
+ ; GFX1011-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s96)
+ ; GFX1011-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX1011-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[C]]
+ ; GFX1011-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX1011-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV5]]
+ ; GFX1011-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV4]]
+ ; GFX1011-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[MUL1]]
+ ; GFX1011-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV2]], [[UV3]]
+ ; GFX1011-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL2]]
+ ; GFX1011-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV7]](s32), [[ADD1]](s32)
+ ; GFX1011-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV4]], [[MV]]
+ ; GFX1011-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_2]]
+ ; GFX1011-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
+ ; GFX1011-NEXT: [[MV1:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[UV6]](s32), [[UV8]](s32), [[UV9]](s32)
+ ; GFX1011-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[MV1]](s96)
+ ;
+ ; GFX12-LABEL: name: test_mul_s96
+ ; GFX12: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s96) = COPY $vgpr3_vgpr4_vgpr5
+ ; GFX12-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s96)
+ ; GFX12-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s96)
+ ; GFX12-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX12-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV3]], [[C]]
+ ; GFX12-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+ ; GFX12-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV5]]
+ ; GFX12-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV4]]
+ ; GFX12-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[MUL1]]
+ ; GFX12-NEXT: [[MUL2:%[0-9]+]]:_(s32) = G_MUL [[UV2]], [[UV3]]
+ ; GFX12-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL2]]
+ ; GFX12-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV7]](s32), [[ADD1]](s32)
+ ; GFX12-NEXT: [[AMDGPU_MAD_U64_U32_2:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_3:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV4]], [[MV]]
+ ; GFX12-NEXT: [[AMDGPU_MAD_U64_U32_4:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_5:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV1]](s32), [[UV3]], [[AMDGPU_MAD_U64_U32_2]]
+ ; GFX12-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_4]](s64)
+ ; GFX12-NEXT: [[MV1:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[UV6]](s32), [[UV8]](s32), [[UV9]](s32)
+ ; GFX12-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[MV1]](s96)
%0:_(s96) = COPY $vgpr0_vgpr1_vgpr2
%1:_(s96) = COPY $vgpr3_vgpr4_vgpr5
%2:_(s96) = G_MUL %0, %1
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll
index 8ca09973a8ed..904120e7d118 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mubuf-global.ll
@@ -1295,8 +1295,7 @@ define amdgpu_ps float @mubuf_atomicrmw_sgpr_ptr_offset4095(ptr addrspace(1) inr
; GFX12-NEXT: v_dual_mov_b32 v0, 2 :: v_dual_mov_b32 v1, 0
; GFX12-NEXT: global_atomic_add_u32 v0, v1, v0, s[2:3] offset:16380 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4095
%result = atomicrmw add ptr addrspace(1) %gep, i32 2 syncscope("agent") seq_cst
@@ -1347,8 +1346,7 @@ define amdgpu_ps float @mubuf_atomicrmw_sgpr_ptr_offset4294967296(ptr addrspace(
; GFX12-NEXT: v_mov_b32_e32 v2, 2
; GFX12-NEXT: global_atomic_add_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4294967296
%result = atomicrmw add ptr addrspace(1) %gep, i32 2 syncscope("agent") seq_cst
@@ -1389,8 +1387,7 @@ define amdgpu_ps float @mubuf_atomicrmw_vgpr_ptr_offset4095(ptr addrspace(1) %pt
; GFX12-NEXT: v_mov_b32_e32 v2, 2
; GFX12-NEXT: global_atomic_add_u32 v0, v[0:1], v2, off offset:16380 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4095
%result = atomicrmw add ptr addrspace(1) %gep, i32 2 syncscope("agent") seq_cst
@@ -1438,8 +1435,7 @@ define amdgpu_ps float @mubuf_atomicrmw_vgpr_ptr_offset4294967296(ptr addrspace(
; GFX12-NEXT: v_mov_b32_e32 v2, 2
; GFX12-NEXT: global_atomic_add_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4294967296
%result = atomicrmw add ptr addrspace(1) %gep, i32 2 syncscope("agent") seq_cst
@@ -1491,8 +1487,7 @@ define amdgpu_ps float @mubuf_atomicrmw_sgpr_ptr_vgpr_offset(ptr addrspace(1) in
; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v3, v1, vcc_lo
; GFX12-NEXT: global_atomic_add_u32 v0, v[0:1], v4, off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 %voffset
%result = atomicrmw add ptr addrspace(1) %gep, i32 2 syncscope("agent") seq_cst
@@ -1536,8 +1531,7 @@ define amdgpu_ps float @mubuf_cmpxchg_sgpr_ptr_offset4095(ptr addrspace(1) inreg
; GFX12-NEXT: v_mov_b32_e32 v0, 0
; GFX12-NEXT: global_atomic_cmpswap_b32 v0, v0, v[1:2], s[2:3] offset:16380 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4095
%result.struct = cmpxchg ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
@@ -1590,8 +1584,7 @@ define amdgpu_ps float @mubuf_cmpxchg_sgpr_ptr_offset4294967296(ptr addrspace(1)
; GFX12-NEXT: v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v3, s0
; GFX12-NEXT: global_atomic_cmpswap_b32 v0, v[3:4], v[1:2], off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4294967296
%result.struct = cmpxchg ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
@@ -1633,8 +1626,7 @@ define amdgpu_ps float @mubuf_cmpxchg_vgpr_ptr_offset4095(ptr addrspace(1) %ptr,
; GFX12-NEXT: v_mov_b32_e32 v4, v2
; GFX12-NEXT: global_atomic_cmpswap_b32 v0, v[0:1], v[3:4], off offset:16380 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4095
%result.struct = cmpxchg ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
@@ -1682,8 +1674,7 @@ define amdgpu_ps float @mubuf_cmpxchg_vgpr_ptr_offset4294967296(ptr addrspace(1)
; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v6, vcc_lo
; GFX12-NEXT: global_atomic_cmpswap_b32 v0, v[0:1], v[3:4], off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4294967296
%result.struct = cmpxchg ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
@@ -1736,8 +1727,7 @@ define amdgpu_ps float @mubuf_cmpxchg_sgpr_ptr_vgpr_offset(ptr addrspace(1) inre
; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v5, v1, vcc_lo
; GFX12-NEXT: global_atomic_cmpswap_b32 v0, v[0:1], v[2:3], off th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: ; return to shader part epilog
%gep = getelementptr i32, ptr addrspace(1) %ptr, i32 %voffset
%result.struct = cmpxchg ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
index eb3f74be71de..0840f58ecd1a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
@@ -4,6 +4,7 @@
; RUN: llc -global-isel -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9 %s
; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX10 %s
; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10PLUS,GFX11 %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
define amdgpu_ps i16 @s_mul_i16(i16 inreg %num, i16 inreg %den) {
; GFX7-LABEL: s_mul_i16:
@@ -31,6 +32,14 @@ define amdgpu_ps i16 @s_mul_i16(i16 inreg %num, i16 inreg %den) {
; GFX10PLUS-NEXT: s_and_b32 s1, s1, 0xffff
; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s1
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX12-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_mul_i32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i16 %num, %den
ret i16 %result
}
@@ -61,6 +70,12 @@ define i16 @v_mul_i16(i16 %num, i16 %den) {
; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10PLUS-NEXT: v_mul_lo_u16 v0, v0, v1
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i16 %num, %den
ret i16 %result
}
@@ -95,6 +110,15 @@ define amdgpu_ps zeroext i16 @s_mul_i16_zeroext(i16 inreg zeroext %num, i16 inre
; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s1
; GFX10PLUS-NEXT: s_and_b32 s0, s0, 0xffff
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX12-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_mul_i32 s0, s0, s1
+; GFX12-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i16 %num, %den
ret i16 %result
}
@@ -125,6 +149,14 @@ define zeroext i16 @v_mul_i16_zeroext(i16 zeroext %num, i16 zeroext %den) {
; GFX10PLUS-NEXT: v_mul_lo_u16 v0, v0, v1
; GFX10PLUS-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i16_zeroext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i16 %num, %den
ret i16 %result
}
@@ -159,6 +191,15 @@ define amdgpu_ps signext i16 @s_mul_i16_signext(i16 inreg signext %num, i16 inre
; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s1
; GFX10PLUS-NEXT: s_sext_i32_i16 s0, s0
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX12-NEXT: s_and_b32 s1, s1, 0xffff
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_mul_i32 s0, s0, s1
+; GFX12-NEXT: s_sext_i32_i16 s0, s0
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i16 %num, %den
ret i16 %result
}
@@ -193,6 +234,14 @@ define signext i16 @v_mul_i16_signext(i16 signext %num, i16 signext %den) {
; GFX10PLUS-NEXT: v_mul_lo_u16 v0, v0, v1
; GFX10PLUS-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i16_signext:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i16 %num, %den
ret i16 %result
}
@@ -207,6 +256,11 @@ define amdgpu_ps i32 @s_mul_i32(i32 inreg %num, i32 inreg %den) {
; GFX10PLUS: ; %bb.0:
; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s1
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mul_i32 s0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i32 %num, %den
ret i32 %result
}
@@ -223,6 +277,12 @@ define i32 @v_mul_i32(i32 %num, i32 %den) {
; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10PLUS-NEXT: v_mul_lo_u32 v0, v0, v1
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i32 %num, %den
ret i32 %result
}
@@ -239,6 +299,12 @@ define amdgpu_ps <2 x i32> @s_mul_v2i32(<2 x i32> inreg %num, <2 x i32> inreg %d
; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s2
; GFX10PLUS-NEXT: s_mul_i32 s1, s1, s3
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mul_i32 s0, s0, s2
+; GFX12-NEXT: s_mul_i32 s1, s1, s3
+; GFX12-NEXT: ; return to shader part epilog
%result = mul <2 x i32> %num, %den
ret <2 x i32> %result
}
@@ -257,6 +323,13 @@ define <2 x i32> @v_mul_v2i32(<2 x i32> %num, <2 x i32> %den) {
; GFX10PLUS-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX10PLUS-NEXT: v_mul_lo_u32 v1, v1, v3
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_v2i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul <2 x i32> %num, %den
ret <2 x i32> %result
}
@@ -308,6 +381,11 @@ define amdgpu_cs i33 @s_mul_i33(i33 inreg %num, i33 inreg %den) {
; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s2
; GFX10PLUS-NEXT: s_add_i32 s1, s3, s1
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i33:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mul_u64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i33 %num, %den
ret i33 %result
}
@@ -359,6 +437,11 @@ define amdgpu_ps i64 @s_mul_i64(i64 inreg %num, i64 inreg %den) {
; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s2
; GFX10PLUS-NEXT: s_add_i32 s1, s3, s1
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mul_u64 s[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i64 %num, %den
ret i64 %result
}
@@ -394,6 +477,17 @@ define i64 @v_mul_i64(i64 %num, i64 %den) {
; GFX11-NEXT: v_mul_lo_u32 v2, v5, v2
; GFX11-NEXT: v_add3_u32 v1, v1, v3, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_mul_hi_u32 v4, v0, v2
+; GFX12-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_add3_u32 v1, v4, v3, v1
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i64 %num, %den
ret i64 %result
}
@@ -490,6 +584,26 @@ define amdgpu_ps <3 x i32> @s_mul_i96(i96 inreg %num, i96 inreg %den) {
; GFX10PLUS-NEXT: s_addc_u32 s2, s3, s0
; GFX10PLUS-NEXT: s_mov_b32 s0, s5
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mul_i32 s6, s0, s5
+; GFX12-NEXT: s_mul_i32 s7, s1, s4
+; GFX12-NEXT: s_mul_i32 s2, s2, s3
+; GFX12-NEXT: s_add_co_i32 s6, s6, s7
+; GFX12-NEXT: s_mul_hi_u32 s7, s0, s3
+; GFX12-NEXT: s_add_co_i32 s6, s6, s2
+; GFX12-NEXT: s_mul_i32 s2, s0, s4
+; GFX12-NEXT: s_mul_i32 s5, s0, s3
+; GFX12-NEXT: s_mul_hi_u32 s0, s0, s4
+; GFX12-NEXT: s_add_co_u32 s2, s2, s7
+; GFX12-NEXT: s_mul_i32 s4, s1, s3
+; GFX12-NEXT: s_add_co_ci_u32 s0, s0, s6
+; GFX12-NEXT: s_mul_hi_u32 s3, s1, s3
+; GFX12-NEXT: s_add_co_u32 s1, s4, s2
+; GFX12-NEXT: s_add_co_ci_u32 s2, s3, s0
+; GFX12-NEXT: s_mov_b32 s0, s5
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i96 %num, %den
%cast = bitcast i96 %result to <3 x i32>
ret <3 x i32> %cast
@@ -536,6 +650,22 @@ define i96 @v_mul_i96(i96 %num, i96 %den) {
; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v6, v4, v[1:2]
; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v7, v3, v[1:2]
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i96:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_dual_mov_b32 v6, v0 :: v_dual_mov_b32 v7, v1
+; GFX12-NEXT: v_mul_lo_u32 v2, v2, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mul_lo_u32 v5, v6, v5
+; GFX12-NEXT: v_mul_lo_u32 v8, v7, v4
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v6, v3, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add3_u32 v2, v5, v8, v2
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v6, v4, v[1:2]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v7, v3, v[1:2]
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i96 %num, %den
ret i96 %result
}
@@ -709,6 +839,42 @@ define amdgpu_ps <4 x i32> @s_mul_i128(i128 inreg %num, i128 inreg %den) {
; GFX10PLUS-NEXT: s_mov_b32 s1, s8
; GFX10PLUS-NEXT: s_mov_b32 s2, s7
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mul_i32 s9, s0, s6
+; GFX12-NEXT: s_mul_i32 s11, s1, s5
+; GFX12-NEXT: s_mul_hi_u32 s10, s0, s6
+; GFX12-NEXT: s_mul_hi_u32 s12, s1, s5
+; GFX12-NEXT: s_add_co_u32 s9, s11, s9
+; GFX12-NEXT: s_mul_i32 s11, s2, s4
+; GFX12-NEXT: s_add_co_ci_u32 s10, s12, s10
+; GFX12-NEXT: s_mul_hi_u32 s12, s2, s4
+; GFX12-NEXT: s_mul_hi_u32 s8, s0, s4
+; GFX12-NEXT: s_add_co_u32 s9, s11, s9
+; GFX12-NEXT: s_mul_i32 s11, s0, s5
+; GFX12-NEXT: s_add_co_ci_u32 s10, s12, s10
+; GFX12-NEXT: s_mul_hi_u32 s12, s0, s5
+; GFX12-NEXT: s_add_co_u32 s8, s11, s8
+; GFX12-NEXT: s_add_co_ci_u32 s9, s12, s9
+; GFX12-NEXT: s_mul_i32 s12, s1, s4
+; GFX12-NEXT: s_mul_hi_u32 s13, s1, s4
+; GFX12-NEXT: s_cselect_b32 s11, 1, 0
+; GFX12-NEXT: s_add_co_u32 s8, s12, s8
+; GFX12-NEXT: s_mul_i32 s12, s0, s7
+; GFX12-NEXT: s_add_co_ci_u32 s7, s13, s9
+; GFX12-NEXT: s_add_co_ci_u32 s9, s10, s12
+; GFX12-NEXT: s_mul_i32 s1, s1, s6
+; GFX12-NEXT: s_cmp_lg_u32 s11, 0
+; GFX12-NEXT: s_mul_i32 s2, s2, s5
+; GFX12-NEXT: s_add_co_ci_u32 s1, s9, s1
+; GFX12-NEXT: s_mul_i32 s3, s3, s4
+; GFX12-NEXT: s_add_co_i32 s1, s1, s2
+; GFX12-NEXT: s_mul_i32 s0, s0, s4
+; GFX12-NEXT: s_add_co_i32 s3, s1, s3
+; GFX12-NEXT: s_mov_b32 s1, s8
+; GFX12-NEXT: s_mov_b32 s2, s7
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i128 %num, %den
%cast = bitcast i128 %result to <4 x i32>
ret <4 x i32> %cast
@@ -820,6 +986,32 @@ define i128 @v_mul_i128(i128 %num, i128 %den) {
; GFX11-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v7, v6, vcc_lo
; GFX11-NEXT: v_add3_u32 v3, v4, v5, v3
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i128:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_dual_mov_b32 v8, v0 :: v_dual_mov_b32 v9, v1
+; GFX12-NEXT: v_mov_b32_e32 v10, v2
+; GFX12-NEXT: v_mul_lo_u32 v3, v3, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v8, v6, 0
+; GFX12-NEXT: v_mul_lo_u32 v7, v8, v7
+; GFX12-NEXT: v_mul_lo_u32 v6, v9, v6
+; GFX12-NEXT: v_mad_co_u64_u32 v[11:12], null, v9, v5, v[0:1]
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v8, v4, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[11:12], null, v10, v4, v[11:12]
+; GFX12-NEXT: v_mov_b32_e32 v2, v11
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], vcc_lo, v8, v5, v[1:2]
+; GFX12-NEXT: v_mul_lo_u32 v5, v10, v5
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], s0, v9, v4, v[1:2]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_co_ci_u32_e64 v7, s0, v12, v7, s0
+; GFX12-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, v7, v6, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add3_u32 v3, v4, v5, v3
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i128 %num, %den
ret i128 %result
}
@@ -1625,6 +1817,185 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX10PLUS-NEXT: s_add_i32 s7, s1, s7
; GFX10PLUS-NEXT: s_mov_b32 s1, s16
; GFX10PLUS-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: s_mul_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_mul_i32 s17, s0, s10
+; GFX12-NEXT: s_mul_i32 s19, s1, s9
+; GFX12-NEXT: s_mul_hi_u32 s18, s0, s10
+; GFX12-NEXT: s_mul_hi_u32 s20, s1, s9
+; GFX12-NEXT: s_add_co_u32 s17, s19, s17
+; GFX12-NEXT: s_add_co_ci_u32 s18, s20, s18
+; GFX12-NEXT: s_mul_i32 s20, s2, s8
+; GFX12-NEXT: s_mul_hi_u32 s21, s2, s8
+; GFX12-NEXT: s_cselect_b32 s19, 1, 0
+; GFX12-NEXT: s_add_co_u32 s17, s20, s17
+; GFX12-NEXT: s_mul_hi_u32 s16, s0, s8
+; GFX12-NEXT: s_add_co_ci_u32 s18, s21, s18
+; GFX12-NEXT: s_mul_i32 s21, s0, s9
+; GFX12-NEXT: s_mul_hi_u32 s22, s0, s9
+; GFX12-NEXT: s_cselect_b32 s20, 1, 0
+; GFX12-NEXT: s_add_co_u32 s16, s21, s16
+; GFX12-NEXT: s_add_co_ci_u32 s17, s22, s17
+; GFX12-NEXT: s_mul_i32 s22, s1, s8
+; GFX12-NEXT: s_mul_hi_u32 s23, s1, s8
+; GFX12-NEXT: s_cselect_b32 s21, 1, 0
+; GFX12-NEXT: s_add_co_u32 s16, s22, s16
+; GFX12-NEXT: s_add_co_ci_u32 s17, s23, s17
+; GFX12-NEXT: s_mul_i32 s23, s0, s12
+; GFX12-NEXT: s_mul_i32 s25, s1, s11
+; GFX12-NEXT: s_mul_hi_u32 s24, s0, s12
+; GFX12-NEXT: s_mul_hi_u32 s26, s1, s11
+; GFX12-NEXT: s_cselect_b32 s22, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s25, s23
+; GFX12-NEXT: s_add_co_ci_u32 s24, s26, s24
+; GFX12-NEXT: s_mul_i32 s26, s2, s10
+; GFX12-NEXT: s_mul_hi_u32 s27, s2, s10
+; GFX12-NEXT: s_cselect_b32 s25, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s26, s23
+; GFX12-NEXT: s_add_co_ci_u32 s24, s27, s24
+; GFX12-NEXT: s_mul_i32 s27, s3, s9
+; GFX12-NEXT: s_mul_hi_u32 s28, s3, s9
+; GFX12-NEXT: s_cselect_b32 s26, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s27, s23
+; GFX12-NEXT: s_add_co_ci_u32 s24, s28, s24
+; GFX12-NEXT: s_mul_i32 s28, s4, s8
+; GFX12-NEXT: s_mul_hi_u32 s29, s4, s8
+; GFX12-NEXT: s_cselect_b32 s27, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s28, s23
+; GFX12-NEXT: s_add_co_ci_u32 s24, s29, s24
+; GFX12-NEXT: s_mul_i32 s29, s0, s11
+; GFX12-NEXT: s_mul_hi_u32 s30, s0, s11
+; GFX12-NEXT: s_cselect_b32 s28, 1, 0
+; GFX12-NEXT: s_add_co_u32 s18, s29, s18
+; GFX12-NEXT: s_add_co_ci_u32 s23, s30, s23
+; GFX12-NEXT: s_mul_i32 s30, s1, s10
+; GFX12-NEXT: s_mul_hi_u32 s31, s1, s10
+; GFX12-NEXT: s_cselect_b32 s29, 1, 0
+; GFX12-NEXT: s_add_co_u32 s18, s30, s18
+; GFX12-NEXT: s_add_co_ci_u32 s23, s31, s23
+; GFX12-NEXT: s_mul_i32 s31, s2, s9
+; GFX12-NEXT: s_mul_hi_u32 s33, s2, s9
+; GFX12-NEXT: s_cselect_b32 s30, 1, 0
+; GFX12-NEXT: s_add_co_u32 s18, s31, s18
+; GFX12-NEXT: s_add_co_ci_u32 s23, s33, s23
+; GFX12-NEXT: s_mul_i32 s33, s3, s8
+; GFX12-NEXT: s_mul_hi_u32 s34, s3, s8
+; GFX12-NEXT: s_cselect_b32 s31, 1, 0
+; GFX12-NEXT: s_add_co_u32 s18, s33, s18
+; GFX12-NEXT: s_add_co_ci_u32 s23, s34, s23
+; GFX12-NEXT: s_cselect_b32 s33, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s22, 0
+; GFX12-NEXT: s_mul_hi_u32 s22, s0, s14
+; GFX12-NEXT: s_add_co_ci_u32 s18, s21, s18
+; GFX12-NEXT: s_cselect_b32 s21, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s20, 0
+; GFX12-NEXT: s_mul_hi_u32 s34, s1, s13
+; GFX12-NEXT: s_add_co_ci_u32 s19, s19, 0
+; GFX12-NEXT: s_cmp_lg_u32 s21, 0
+; GFX12-NEXT: s_mul_i32 s21, s0, s14
+; GFX12-NEXT: s_add_co_ci_u32 s19, s19, s23
+; GFX12-NEXT: s_mul_i32 s23, s1, s13
+; GFX12-NEXT: s_cselect_b32 s20, 1, 0
+; GFX12-NEXT: s_add_co_u32 s21, s23, s21
+; GFX12-NEXT: s_mul_i32 s23, s2, s12
+; GFX12-NEXT: s_add_co_ci_u32 s22, s34, s22
+; GFX12-NEXT: s_mul_hi_u32 s34, s2, s12
+; GFX12-NEXT: s_add_co_u32 s21, s23, s21
+; GFX12-NEXT: s_mul_i32 s23, s3, s11
+; GFX12-NEXT: s_add_co_ci_u32 s22, s34, s22
+; GFX12-NEXT: s_mul_hi_u32 s34, s3, s11
+; GFX12-NEXT: s_add_co_u32 s21, s23, s21
+; GFX12-NEXT: s_mul_i32 s23, s4, s10
+; GFX12-NEXT: s_add_co_ci_u32 s22, s34, s22
+; GFX12-NEXT: s_mul_hi_u32 s34, s4, s10
+; GFX12-NEXT: s_add_co_u32 s21, s23, s21
+; GFX12-NEXT: s_mul_i32 s23, s5, s9
+; GFX12-NEXT: s_add_co_ci_u32 s22, s34, s22
+; GFX12-NEXT: s_mul_hi_u32 s34, s5, s9
+; GFX12-NEXT: s_add_co_u32 s21, s23, s21
+; GFX12-NEXT: s_mul_i32 s23, s6, s8
+; GFX12-NEXT: s_add_co_ci_u32 s22, s34, s22
+; GFX12-NEXT: s_mul_hi_u32 s34, s6, s8
+; GFX12-NEXT: s_add_co_u32 s21, s23, s21
+; GFX12-NEXT: s_mul_i32 s23, s0, s13
+; GFX12-NEXT: s_add_co_ci_u32 s22, s34, s22
+; GFX12-NEXT: s_mul_hi_u32 s34, s0, s13
+; GFX12-NEXT: s_add_co_u32 s23, s23, s24
+; GFX12-NEXT: s_add_co_ci_u32 s21, s34, s21
+; GFX12-NEXT: s_mul_i32 s34, s1, s12
+; GFX12-NEXT: s_mul_hi_u32 s35, s1, s12
+; GFX12-NEXT: s_cselect_b32 s24, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s34, s23
+; GFX12-NEXT: s_add_co_ci_u32 s21, s35, s21
+; GFX12-NEXT: s_mul_i32 s35, s2, s11
+; GFX12-NEXT: s_mul_hi_u32 s36, s2, s11
+; GFX12-NEXT: s_cselect_b32 s34, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s35, s23
+; GFX12-NEXT: s_add_co_ci_u32 s21, s36, s21
+; GFX12-NEXT: s_mul_i32 s36, s3, s10
+; GFX12-NEXT: s_mul_hi_u32 s37, s3, s10
+; GFX12-NEXT: s_cselect_b32 s35, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s36, s23
+; GFX12-NEXT: s_add_co_ci_u32 s21, s37, s21
+; GFX12-NEXT: s_mul_i32 s37, s4, s9
+; GFX12-NEXT: s_mul_hi_u32 s38, s4, s9
+; GFX12-NEXT: s_cselect_b32 s36, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s37, s23
+; GFX12-NEXT: s_add_co_ci_u32 s21, s38, s21
+; GFX12-NEXT: s_mul_i32 s38, s5, s8
+; GFX12-NEXT: s_mul_hi_u32 s39, s5, s8
+; GFX12-NEXT: s_cselect_b32 s37, 1, 0
+; GFX12-NEXT: s_add_co_u32 s23, s38, s23
+; GFX12-NEXT: s_add_co_ci_u32 s21, s39, s21
+; GFX12-NEXT: s_cselect_b32 s38, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s30, 0
+; GFX12-NEXT: s_mul_i32 s1, s1, s14
+; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0
+; GFX12-NEXT: s_cmp_lg_u32 s31, 0
+; GFX12-NEXT: s_mul_i32 s2, s2, s13
+; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0
+; GFX12-NEXT: s_cmp_lg_u32 s33, 0
+; GFX12-NEXT: s_mul_i32 s3, s3, s12
+; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0
+; GFX12-NEXT: s_cmp_lg_u32 s20, 0
+; GFX12-NEXT: s_mul_i32 s4, s4, s11
+; GFX12-NEXT: s_add_co_ci_u32 s20, s29, s23
+; GFX12-NEXT: s_cselect_b32 s23, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s26, 0
+; GFX12-NEXT: s_mul_i32 s26, s0, s15
+; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0
+; GFX12-NEXT: s_cmp_lg_u32 s27, 0
+; GFX12-NEXT: s_mul_i32 s5, s5, s10
+; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0
+; GFX12-NEXT: s_cmp_lg_u32 s28, 0
+; GFX12-NEXT: s_mul_i32 s6, s6, s9
+; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0
+; GFX12-NEXT: s_cmp_lg_u32 s23, 0
+; GFX12-NEXT: s_mul_i32 s7, s7, s8
+; GFX12-NEXT: s_add_co_ci_u32 s15, s25, s21
+; GFX12-NEXT: s_add_co_ci_u32 s21, s22, s26
+; GFX12-NEXT: s_cmp_lg_u32 s38, 0
+; GFX12-NEXT: s_mul_i32 s0, s0, s8
+; GFX12-NEXT: s_add_co_ci_u32 s1, s21, s1
+; GFX12-NEXT: s_cmp_lg_u32 s37, 0
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s2
+; GFX12-NEXT: s_cmp_lg_u32 s36, 0
+; GFX12-NEXT: s_mov_b32 s2, s17
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s3
+; GFX12-NEXT: s_cmp_lg_u32 s35, 0
+; GFX12-NEXT: s_mov_b32 s3, s18
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s4
+; GFX12-NEXT: s_cmp_lg_u32 s34, 0
+; GFX12-NEXT: s_mov_b32 s4, s19
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s5
+; GFX12-NEXT: s_cmp_lg_u32 s24, 0
+; GFX12-NEXT: s_mov_b32 s5, s20
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s6
+; GFX12-NEXT: s_mov_b32 s6, s15
+; GFX12-NEXT: s_add_co_i32 s7, s1, s7
+; GFX12-NEXT: s_mov_b32 s1, s16
+; GFX12-NEXT: ; return to shader part epilog
%result = mul i256 %num, %den
%cast = bitcast i256 %result to <8 x i32>
ret <8 x i32> %cast
@@ -1978,6 +2349,454 @@ define i256 @v_mul_i256(i256 %num, i256 %den) {
; GFX11-NEXT: v_add_co_ci_u32_e64 v8, vcc_lo, v9, v27, s0
; GFX11-NEXT: v_add_nc_u32_e32 v7, v8, v7
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: v_mul_i256:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_dual_mov_b32 v16, v0 :: v_dual_mov_b32 v17, v1
+; GFX12-NEXT: v_mul_lo_u32 v27, v6, v9
+; GFX12-NEXT: v_mul_lo_u32 v7, v7, v8
+; GFX12-NEXT: v_mul_lo_u32 v28, v5, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v16, v14, 0
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], null, v16, v12, 0
+; GFX12-NEXT: v_mul_lo_u32 v30, v17, v14
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v17, v13, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], s0, v17, v11, v[18:19]
+; GFX12-NEXT: v_cndmask_b32_e64 v20, 0, 1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v2, v12, v[0:1]
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], vcc_lo, v2, v10, v[18:19]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, 0, v20, vcc_lo
+; GFX12-NEXT: v_mad_co_u64_u32 v[20:21], null, v16, v10, 0
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v3, v11, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], vcc_lo, v3, v9, v[18:19]
+; GFX12-NEXT: v_add_co_ci_u32_e32 v24, vcc_lo, 0, v22, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v4, v10, v[0:1]
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], vcc_lo, v4, v8, v[18:19]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_add_co_ci_u32_e32 v26, vcc_lo, 0, v24, vcc_lo
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v5, v9, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[22:23], null, v6, v8, v[0:1]
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], s0, v17, v9, v[20:21]
+; GFX12-NEXT: v_cndmask_b32_e64 v25, 0, 1, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mov_b32_e32 v20, v22
+; GFX12-NEXT: v_mad_co_u64_u32 v[21:22], vcc_lo, v2, v8, v[0:1]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_add_co_ci_u32_e32 v29, vcc_lo, 0, v25, vcc_lo
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], s0, v16, v13, v[19:20]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mov_b32_e32 v19, v22
+; GFX12-NEXT: v_mul_lo_u32 v22, v16, v15
+; GFX12-NEXT: v_mad_co_u64_u32 v[24:25], vcc_lo, v17, v12, v[0:1]
+; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, v16, v8, 0
+; GFX12-NEXT: v_mov_b32_e32 v20, v18
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-NEXT: v_mad_co_u64_u32 v[14:15], s2, v16, v11, v[19:20]
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], s1, v2, v11, v[24:25]
+; GFX12-NEXT: v_mul_lo_u32 v20, v4, v11
+; GFX12-NEXT: v_mul_lo_u32 v25, v3, v12
+; GFX12-NEXT: v_cndmask_b32_e64 v6, 0, 1, s2
+; GFX12-NEXT: v_mul_lo_u32 v24, v2, v13
+; GFX12-NEXT: v_mov_b32_e32 v13, v1
+; GFX12-NEXT: v_mad_co_u64_u32 v[11:12], s2, v17, v10, v[14:15]
+; GFX12-NEXT: v_mad_co_u64_u32 v[18:19], s3, v3, v10, v[18:19]
+; GFX12-NEXT: v_add_co_ci_u32_e64 v6, s2, 0, v6, s2
+; GFX12-NEXT: v_mov_b32_e32 v14, v21
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], s2, v2, v9, v[11:12]
+; GFX12-NEXT: v_add_co_ci_u32_e64 v6, s2, 0, v6, s2
+; GFX12-NEXT: v_mad_co_u64_u32 v[10:11], s2, v4, v9, v[18:19]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[12:13], s4, v16, v9, v[13:14]
+; GFX12-NEXT: v_cndmask_b32_e64 v9, 0, 1, s4
+; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], s4, v3, v8, v[1:2]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_co_ci_u32_e64 v14, s4, 0, v6, s4
+; GFX12-NEXT: v_mad_co_u64_u32 v[5:6], s4, v5, v8, v[10:11]
+; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], s5, v17, v8, v[12:13]
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, s5, v9, v3, s5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_co_ci_u32_e64 v4, s5, v29, v4, s5
+; GFX12-NEXT: v_add_co_ci_u32_e64 v5, s5, v14, v5, s5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_co_ci_u32_e64 v6, s5, v26, v6, s5
+; GFX12-NEXT: v_add_co_ci_u32_e64 v9, s5, v23, v22, s5
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_co_ci_u32_e64 v9, s4, v9, v30, s4
+; GFX12-NEXT: v_add_co_ci_u32_e64 v9, s2, v9, v24, s2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_co_ci_u32_e64 v9, s2, v9, v25, s3
+; GFX12-NEXT: v_add_co_ci_u32_e64 v9, s1, v9, v20, s1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v9, v28, vcc_lo
+; GFX12-NEXT: v_add_co_ci_u32_e64 v8, vcc_lo, v9, v27, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u32_e32 v7, v8, v7
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%result = mul i256 %num, %den
ret i256 %result
}
+
+define amdgpu_ps void @s_mul_u64_zext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_mul_u64_zext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v2, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v3, 0x50
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, v3, 0
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_mul_u64_zext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v2, v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v3, 0x50
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v2, v3, 0
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_mul_u64_zext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v2, v[2:3], off
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x50
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v2, v3, 0
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_mul_u64_zext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s0, 0x50, v2, 0
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_mul_u64_zext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0x50, v2, 0
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_mul_u64_zext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0x50, v2, 0
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %mul = mul i64 %ext, 80
+ store i64 %mul, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_mul_u64_zext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_mul_u64_zext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX7-NEXT: v_mov_b32_e32 v0, 0x50
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mul_hi_u32 v0, s3, v0
+; GFX7-NEXT: s_mul_i32 s4, s3, 0x50
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: v_readfirstlane_b32 s5, v0
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_mul_u64_zext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT: v_mov_b32_e32 v0, 0x50
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mul_hi_u32 v0, s2, v0
+; GFX8-NEXT: s_mulk_i32 s2, 0x50
+; GFX8-NEXT: v_readfirstlane_b32 s3, v0
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_mul_u64_zext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_mul_i32 s2, s3, 0x50
+; GFX9-NEXT: s_mul_hi_u32 s3, s3, 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_mul_u64_zext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_mul_i32 s2, s3, 0x50
+; GFX10-NEXT: s_mul_hi_u32 s3, s3, 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_mul_u64_zext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s3, s[2:3], 0x0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_mul_i32 s2, s3, 0x50
+; GFX11-NEXT: s_mul_hi_u32 s3, s3, 0x50
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_mul_u64_zext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_mov_b32 s3, 0
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mul_u64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %mul = mul i64 %ext, 80
+ store i64 %mul, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @s_mul_u64_sext_with_vregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_mul_u64_sext_with_vregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_mov_b32 s2, 0
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b64 s[0:1], 0
+; GFX7-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64
+; GFX7-NEXT: v_mov_b32_e32 v5, 0x50
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v4, v5, 0
+; GFX7-NEXT: v_ashrrev_i32_e32 v4, 31, v4
+; GFX7-NEXT: v_mad_u64_u32 v[3:4], s[4:5], v4, v5, v[3:4]
+; GFX7-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_mul_u64_sext_with_vregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: flat_load_dword v4, v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v5, 0x50
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v4, v5, 0
+; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v4
+; GFX8-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v4, v5, v[3:4]
+; GFX8-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_mul_u64_sext_with_vregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: global_load_dword v4, v[2:3], off
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x50
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[0:1], v4, v5, 0
+; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v4
+; GFX9-NEXT: v_mad_u64_u32 v[3:4], s[0:1], v4, v5, v[3:4]
+; GFX9-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_mul_u64_sext_with_vregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: global_load_dword v2, v[2:3], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v2
+; GFX10-NEXT: v_mad_u64_u32 v[2:3], s0, 0x50, v2, 0
+; GFX10-NEXT: v_mul_lo_u32 v4, 0x50, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v4
+; GFX10-NEXT: global_store_dwordx2 v[0:1], v[2:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_mul_u64_sext_with_vregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: global_load_b32 v2, v[2:3], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_ashrrev_i32_e32 v4, 31, v2
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0x50, v2, 0
+; GFX11-NEXT: v_mul_lo_u32 v4, 0x50, v4
+; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v4
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_mul_u64_sext_with_vregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: global_load_b32 v2, v[2:3], off
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mad_co_i64_i32 v[2:3], null, 0x50, v2, 0
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %mul = mul i64 %ext, 80
+ store i64 %mul, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_kernel void @s_mul_u64_sext_with_sregs(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; GFX7-LABEL: s_mul_u64_sext_with_sregs:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX7-NEXT: v_mov_b32_e32 v0, 0x50
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: v_mul_hi_u32 v0, s3, v0
+; GFX7-NEXT: s_ashr_i32 s5, s3, 31
+; GFX7-NEXT: s_mul_i32 s4, s3, 0x50
+; GFX7-NEXT: s_mulk_i32 s5, 0x50
+; GFX7-NEXT: v_readfirstlane_b32 s3, v0
+; GFX7-NEXT: s_add_u32 s5, s5, s3
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: s_mul_u64_sext_with_sregs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT: v_mov_b32_e32 v0, 0x50
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mul_hi_u32 v0, s2, v0
+; GFX8-NEXT: s_ashr_i32 s3, s2, 31
+; GFX8-NEXT: s_mulk_i32 s2, 0x50
+; GFX8-NEXT: s_mulk_i32 s3, 0x50
+; GFX8-NEXT: v_readfirstlane_b32 s4, v0
+; GFX8-NEXT: s_add_u32 s3, s3, s4
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-LABEL: s_mul_u64_sext_with_sregs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_load_dword s3, s[2:3], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_ashr_i32 s4, s3, 31
+; GFX9-NEXT: s_mul_i32 s2, s3, 0x50
+; GFX9-NEXT: s_mul_hi_u32 s3, s3, 0x50
+; GFX9-NEXT: s_mulk_i32 s4, 0x50
+; GFX9-NEXT: s_add_u32 s3, s4, s3
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: s_mul_u64_sext_with_sregs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_ashr_i32 s3, s2, 31
+; GFX10-NEXT: s_mul_hi_u32 s4, s2, 0x50
+; GFX10-NEXT: s_mulk_i32 s3, 0x50
+; GFX10-NEXT: s_mulk_i32 s2, 0x50
+; GFX10-NEXT: s_add_i32 s3, s4, s3
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: s_mul_u64_sext_with_sregs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_ashr_i32 s3, s2, 31
+; GFX11-NEXT: s_mul_hi_u32 s4, s2, 0x50
+; GFX11-NEXT: s_mulk_i32 s3, 0x50
+; GFX11-NEXT: s_mulk_i32 s2, 0x50
+; GFX11-NEXT: s_add_i32 s3, s4, s3
+; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_mul_u64_sext_with_sregs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: v_mov_b32_e32 v2, 0
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_ashr_i32 s3, s2, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_mul_u64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = sext i32 %val to i64
+ %mul = mul i64 %ext, 80
+ store i64 %mul, ptr addrspace(1) %out, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-mul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-mul.mir
new file mode 100644
index 000000000000..f74a575ac931
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/postlegalizercombiner-mul.mir
@@ -0,0 +1,60 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -march=amdgcn -mcpu=gfx1200 -run-pass=amdgpu-postlegalizer-combiner -verify-machineinstrs -o - %s | FileCheck %s
+
+---
+name: mul_s64
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; CHECK-LABEL: name: mul_s64
+ ; CHECK: liveins: $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12345
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[C]]
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[MUL]](s64)
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = G_CONSTANT i64 12345
+ %2:_(s64) = G_MUL %0, %1
+ $vgpr0_vgpr1 = COPY %2
+...
+
+---
+name: mul_s64_zext
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; CHECK-LABEL: name: mul_s64_zext
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12345
+ ; CHECK-NEXT: [[AMDGPU_:%[0-9]+]]:_(s64) = G_AMDGPU_S_MUL_U64_U32 [[ZEXT]], [[C]]
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[AMDGPU_]](s64)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s64) = G_ZEXT %0
+ %2:_(s64) = G_CONSTANT i64 12345
+ %3:_(s64) = G_MUL %1, %2
+ $vgpr0_vgpr1 = COPY %3
+...
+
+---
+name: mul_s64_sext
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; CHECK-LABEL: name: mul_s64_sext
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12345
+ ; CHECK-NEXT: [[AMDGPU_:%[0-9]+]]:_(s64) = G_AMDGPU_S_MUL_I64_I32 [[SEXT]], [[C]]
+ ; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[AMDGPU_]](s64)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s64) = G_SEXT %0
+ %2:_(s64) = G_CONSTANT i64 12345
+ %3:_(s64) = G_MUL %1, %2
+ $vgpr0_vgpr1 = COPY %3
+...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir
index a5b61641e0c2..a6cc6c92d9f8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir
@@ -74,3 +74,125 @@ body: |
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_MUL %0, %1
...
+
+---
+name: mul_s64_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-LABEL: name: mul_s64_ss
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:sgpr(s64) = G_MUL [[COPY]], [[COPY1]]
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = COPY $sgpr2_sgpr3
+ %2:_(s64) = G_MUL %0, %1
+...
+
+---
+name: mul_s64_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-LABEL: name: mul_s64_vv
+ ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; CHECK-NEXT: [[UMULH:%[0-9]+]]:vgpr(s32) = G_UMULH [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV3]]
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[UMULH]], [[MUL]]
+ ; CHECK-NEXT: [[MUL1:%[0-9]+]]:vgpr(s32) = G_MUL [[UV1]], [[UV2]]
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[ADD]], [[MUL1]]
+ ; CHECK-NEXT: [[MUL2:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV2]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[MUL2]](s32), [[ADD1]](s32)
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = G_MUL %0, %1
+...
+
+---
+name: mul_s64_zext_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-LABEL: name: mul_s64_zext_ss
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_MUL_U64_:%[0-9]+]]:sgpr_64(s64) = S_MUL_U64 [[COPY]](s64), [[COPY1]](s64)
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = COPY $sgpr2_sgpr3
+ %2:_(s64) = G_AMDGPU_S_MUL_U64_U32 %0, %1
+...
+
+---
+name: mul_s64_zext_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-LABEL: name: mul_s64_zext_vv
+ ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vreg_64(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:vreg_64 = G_AMDGPU_MAD_U64_U32 [[TRUNC]](s32), [[TRUNC1]], [[C]]
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = G_AMDGPU_S_MUL_U64_U32 %0, %1
+...
+
+---
+name: mul_s64_sext_ss
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-LABEL: name: mul_s64_sext_ss
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[S_MUL_U64_:%[0-9]+]]:sgpr_64(s64) = S_MUL_U64 [[COPY]](s64), [[COPY1]](s64)
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s64) = COPY $sgpr2_sgpr3
+ %2:_(s64) = G_AMDGPU_S_MUL_I64_I32 %0, %1
+...
+
+---
+name: mul_s64_sext_vv
+legalized: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-LABEL: name: mul_s64_sext_vv
+ ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vreg_64(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[AMDGPU_MAD_I64_I32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_I64_I32_1:%[0-9]+]]:vreg_64 = G_AMDGPU_MAD_I64_I32 [[TRUNC]](s32), [[TRUNC1]], [[C]]
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = G_AMDGPU_S_MUL_I64_I32 %0, %1
+...
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 9f97f1f4bace..b4c8da44337a 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -240,8 +240,7 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264-NEXT: s_mov_b32 s9, s3
; GFX1264-NEXT: buffer_atomic_add_u32 v1, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB0_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -276,8 +275,7 @@ define amdgpu_kernel void @add_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_add_u32 v1, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB0_2:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
@@ -571,8 +569,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1264-NEXT: s_mov_b32 s13, s7
; GFX1264-NEXT: buffer_atomic_add_u32 v1, off, s[12:15], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB1_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -610,8 +607,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: s_mov_b32 s9, s7
; GFX1232-NEXT: buffer_atomic_add_u32 v1, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB1_2:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
@@ -967,8 +963,7 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264-NEXT: s_mov_b32 s9, s3
; GFX1264-NEXT: buffer_atomic_add_u32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB2_4:
; GFX1264-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -1016,8 +1011,7 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_add_u32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB2_4:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
@@ -1265,27 +1259,27 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
-; GFX1264-NEXT: s_mov_b64 s[4:5], exec
+; GFX1264-NEXT: s_mov_b32 s9, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
+; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB3_2
; GFX1264-NEXT: ; %bb.1:
-; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1264-NEXT: v_mov_b32_e32 v1, 0
-; GFX1264-NEXT: s_mul_i32 s6, s6, 5
+; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-NEXT: v_mov_b32_e32 v0, s6
+; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
; GFX1264-NEXT: s_mov_b32 s10, -1
+; GFX1264-NEXT: v_mov_b32_e32 v0, s6
+; GFX1264-NEXT: v_mov_b32_e32 v1, s7
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
; GFX1264-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB3_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -1303,28 +1297,28 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: add_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX1232-NEXT: s_mov_b32 s5, exec_lo
; GFX1232-NEXT: s_mov_b32 s4, exec_lo
-; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s5, 0
+; GFX1232-NEXT: s_mov_b32 s5, 0
+; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s4, 0
+; GFX1232-NEXT: s_mov_b32 s6, exec_lo
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB3_2
; GFX1232-NEXT: ; %bb.1:
-; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s5
+; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s4
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-NEXT: s_mul_i32 s5, s5, 5
+; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
; GFX1232-NEXT: s_mov_b32 s10, -1
-; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
+; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB3_2:
-; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
; GFX1232-NEXT: v_readfirstlane_b32 s3, v1
@@ -1651,30 +1645,27 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1264-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
; GFX1264-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
; GFX1264-NEXT: s_mov_b64 s[8:9], exec
-; GFX1264-NEXT: s_mov_b64 s[2:3], exec
+; GFX1264-NEXT: s_mov_b32 s11, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s8, 0
+; GFX1264-NEXT: s_mov_b64 s[2:3], exec
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s9, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB4_2
; GFX1264-NEXT: ; %bb.1:
-; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
-; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1264-NEXT: s_bcnt1_i32_b64 s10, s[8:9]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1264-NEXT: s_mul_i32 s9, s1, s8
-; GFX1264-NEXT: s_mul_hi_u32 s10, s0, s8
-; GFX1264-NEXT: s_mul_i32 s8, s0, s8
-; GFX1264-NEXT: s_add_co_i32 s10, s10, s9
+; GFX1264-NEXT: s_mul_u64 s[8:9], s[0:1], s[10:11]
+; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
; GFX1264-NEXT: v_mov_b32_e32 v0, s8
-; GFX1264-NEXT: v_mov_b32_e32 v1, s10
+; GFX1264-NEXT: v_mov_b32_e32 v1, s9
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_mov_b32 s8, s6
; GFX1264-NEXT: s_mov_b32 s9, s7
; GFX1264-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB4_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1264-NEXT: v_readfirstlane_b32 s2, v0
@@ -1696,32 +1687,28 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: s_clause 0x1
; GFX1232-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
; GFX1232-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
-; GFX1232-NEXT: s_mov_b32 s3, exec_lo
; GFX1232-NEXT: s_mov_b32 s2, exec_lo
-; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s3, 0
+; GFX1232-NEXT: s_mov_b32 s3, 0
+; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s2, 0
+; GFX1232-NEXT: s_mov_b32 s8, exec_lo
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB4_2
; GFX1232-NEXT: ; %bb.1:
-; GFX1232-NEXT: s_bcnt1_i32_b32 s3, s3
-; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1232-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1232-NEXT: s_mov_b32 s15, 0x31016000
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1232-NEXT: s_mul_i32 s8, s1, s3
-; GFX1232-NEXT: s_mul_hi_u32 s9, s0, s3
-; GFX1232-NEXT: s_mul_i32 s3, s0, s3
-; GFX1232-NEXT: s_add_co_i32 s9, s9, s8
-; GFX1232-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1232-NEXT: v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v1, s9
-; GFX1232-NEXT: s_mov_b32 s10, -1
-; GFX1232-NEXT: s_mov_b32 s8, s6
-; GFX1232-NEXT: s_mov_b32 s9, s7
-; GFX1232-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
+; GFX1232-NEXT: s_mul_u64 s[2:3], s[0:1], s[2:3]
+; GFX1232-NEXT: s_mov_b32 s14, -1
+; GFX1232-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1232-NEXT: s_mov_b32 s12, s6
+; GFX1232-NEXT: s_mov_b32 s13, s7
+; GFX1232-NEXT: buffer_atomic_add_u64 v[0:1], off, s[12:15], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB4_2:
-; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
; GFX1232-NEXT: v_readfirstlane_b32 s3, v1
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
@@ -1836,8 +1823,7 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX12-NEXT: s_mov_b32 s4, s0
; GFX12-NEXT: buffer_atomic_add_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_mov_b32 s5, s1
; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
; GFX12-NEXT: s_nop 0
@@ -2117,8 +2103,7 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264-NEXT: s_mov_b32 s9, s3
; GFX1264-NEXT: buffer_atomic_sub_u32 v1, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB6_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -2154,8 +2139,7 @@ define amdgpu_kernel void @sub_i32_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_sub_u32 v1, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB6_2:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
@@ -2454,8 +2438,7 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1264-NEXT: s_mov_b32 s13, s7
; GFX1264-NEXT: buffer_atomic_sub_u32 v1, off, s[12:15], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB7_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -2493,8 +2476,7 @@ define amdgpu_kernel void @sub_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: s_mov_b32 s9, s7
; GFX1232-NEXT: buffer_atomic_sub_u32 v1, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB7_2:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
@@ -2850,8 +2832,7 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264-NEXT: s_mov_b32 s9, s3
; GFX1264-NEXT: buffer_atomic_sub_u32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB8_4:
; GFX1264-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -2899,8 +2880,7 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_sub_u32 v0, off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB8_4:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s5
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
@@ -3199,27 +3179,27 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
-; GFX1264-NEXT: s_mov_b64 s[4:5], exec
+; GFX1264-NEXT: s_mov_b32 s9, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
+; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB9_2
; GFX1264-NEXT: ; %bb.1:
-; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
-; GFX1264-NEXT: v_mov_b32_e32 v1, 0
-; GFX1264-NEXT: s_mul_i32 s6, s6, 5
+; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1264-NEXT: v_mov_b32_e32 v0, s6
+; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
; GFX1264-NEXT: s_mov_b32 s10, -1
+; GFX1264-NEXT: v_mov_b32_e32 v0, s6
+; GFX1264-NEXT: v_mov_b32_e32 v1, s7
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
; GFX1264-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB9_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -3240,28 +3220,28 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: sub_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
-; GFX1232-NEXT: s_mov_b32 s5, exec_lo
; GFX1232-NEXT: s_mov_b32 s4, exec_lo
-; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s5, 0
+; GFX1232-NEXT: s_mov_b32 s5, 0
+; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s4, 0
+; GFX1232-NEXT: s_mov_b32 s6, exec_lo
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB9_2
; GFX1232-NEXT: ; %bb.1:
-; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s5
+; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s4
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
-; GFX1232-NEXT: s_mul_i32 s5, s5, 5
+; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
; GFX1232-NEXT: s_mov_b32 s10, -1
-; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
+; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
; GFX1232-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB9_2:
-; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
; GFX1232-NEXT: v_mul_u32_u24_e32 v0, 5, v2
@@ -3604,30 +3584,27 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1264-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
; GFX1264-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
; GFX1264-NEXT: s_mov_b64 s[8:9], exec
-; GFX1264-NEXT: s_mov_b64 s[2:3], exec
+; GFX1264-NEXT: s_mov_b32 s11, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s8, 0
+; GFX1264-NEXT: s_mov_b64 s[2:3], exec
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s9, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB10_2
; GFX1264-NEXT: ; %bb.1:
-; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[8:9]
-; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1264-NEXT: s_bcnt1_i32_b64 s10, s[8:9]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1264-NEXT: s_mul_i32 s9, s1, s8
-; GFX1264-NEXT: s_mul_hi_u32 s10, s0, s8
-; GFX1264-NEXT: s_mul_i32 s8, s0, s8
-; GFX1264-NEXT: s_add_co_i32 s10, s10, s9
+; GFX1264-NEXT: s_mul_u64 s[8:9], s[0:1], s[10:11]
+; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
; GFX1264-NEXT: v_mov_b32_e32 v0, s8
-; GFX1264-NEXT: v_mov_b32_e32 v1, s10
+; GFX1264-NEXT: v_mov_b32_e32 v1, s9
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_mov_b32 s8, s6
; GFX1264-NEXT: s_mov_b32 s9, s7
; GFX1264-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX1264-NEXT: s_waitcnt vmcnt(0)
-; GFX1264-NEXT: buffer_gl0_inv
-; GFX1264-NEXT: buffer_gl1_inv
+; GFX1264-NEXT: global_inv scope:SCOPE_DEV
; GFX1264-NEXT: .LBB10_2:
; GFX1264-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX1264-NEXT: s_waitcnt lgkmcnt(0)
@@ -3652,32 +3629,28 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1232-NEXT: s_clause 0x1
; GFX1232-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
; GFX1232-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
-; GFX1232-NEXT: s_mov_b32 s3, exec_lo
; GFX1232-NEXT: s_mov_b32 s2, exec_lo
-; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s3, 0
+; GFX1232-NEXT: s_mov_b32 s3, 0
+; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s2, 0
+; GFX1232-NEXT: s_mov_b32 s8, exec_lo
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB10_2
; GFX1232-NEXT: ; %bb.1:
-; GFX1232-NEXT: s_bcnt1_i32_b32 s3, s3
-; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
+; GFX1232-NEXT: s_bcnt1_i32_b32 s2, s2
+; GFX1232-NEXT: s_mov_b32 s15, 0x31016000
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1232-NEXT: s_mul_i32 s8, s1, s3
-; GFX1232-NEXT: s_mul_hi_u32 s9, s0, s3
-; GFX1232-NEXT: s_mul_i32 s3, s0, s3
-; GFX1232-NEXT: s_add_co_i32 s9, s9, s8
-; GFX1232-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1232-NEXT: v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v1, s9
-; GFX1232-NEXT: s_mov_b32 s10, -1
-; GFX1232-NEXT: s_mov_b32 s8, s6
-; GFX1232-NEXT: s_mov_b32 s9, s7
-; GFX1232-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
+; GFX1232-NEXT: s_mul_u64 s[2:3], s[0:1], s[2:3]
+; GFX1232-NEXT: s_mov_b32 s14, -1
+; GFX1232-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
+; GFX1232-NEXT: s_mov_b32 s12, s6
+; GFX1232-NEXT: s_mov_b32 s13, s7
+; GFX1232-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[12:15], null th:TH_ATOMIC_RETURN
; GFX1232-NEXT: s_waitcnt vmcnt(0)
-; GFX1232-NEXT: buffer_gl0_inv
-; GFX1232-NEXT: buffer_gl1_inv
+; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB10_2:
-; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s2
+; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX1232-NEXT: s_waitcnt lgkmcnt(0)
; GFX1232-NEXT: v_mul_lo_u32 v5, s1, v2
; GFX1232-NEXT: v_mad_co_u64_u32 v[3:4], null, s0, v2, 0
@@ -3795,8 +3768,7 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX12-NEXT: s_mov_b32 s4, s0
; GFX12-NEXT: buffer_atomic_sub_u64 v[0:1], off, s[8:11], null th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_mov_b32 s5, s1
; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
; GFX12-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
index 1df9a250a315..e18bdc89e7d4 100644
--- a/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomicrmw-expand.ll
@@ -101,8 +101,7 @@ define float @syncscope_system(ptr %addr, float %val) #0 {
; GFX1200-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1200-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX1200-NEXT: buffer_gl0_inv
-; GFX1200-NEXT: buffer_gl1_inv
+; GFX1200-NEXT: global_inv scope:SCOPE_SYS
; GFX1200-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX1200-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX1200-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -209,7 +208,7 @@ define float @syncscope_workgroup_rtn(ptr %addr, float %val) #0 {
; GFX1200-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1200-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX1200-NEXT: buffer_gl0_inv
+; GFX1200-NEXT: global_inv scope:SCOPE_SE
; GFX1200-NEXT: s_setpc_b64 s[30:31]
%res = atomicrmw fadd ptr %addr, float %val syncscope("workgroup") seq_cst
ret float %res
@@ -340,7 +339,7 @@ define void @syncscope_workgroup_nortn(ptr %addr, float %val) #0 {
; GFX1200-NEXT: flat_atomic_add_f32 v[0:1], v2
; GFX1200-NEXT: s_waitcnt lgkmcnt(0)
; GFX1200-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX1200-NEXT: buffer_gl0_inv
+; GFX1200-NEXT: global_inv scope:SCOPE_SE
; GFX1200-NEXT: s_setpc_b64 s[30:31]
%res = atomicrmw fadd ptr %addr, float %val syncscope("workgroup") seq_cst
ret void
@@ -435,7 +434,7 @@ define float @no_unsafe(ptr %addr, float %val) {
; GFX1200-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1200-NEXT: flat_atomic_cmpswap_b32 v3, v[0:1], v[3:4] th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX1200-NEXT: buffer_gl0_inv
+; GFX1200-NEXT: global_inv scope:SCOPE_SE
; GFX1200-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX1200-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX1200-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 2a3417e24185..4e87b4e82ba3 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -2411,16 +2411,16 @@ define void @test_load_store_v16bf16(ptr addrspace(1) %in, ptr addrspace(1) %out
; GFX8-LABEL: test_load_store_v16bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 16, v0
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
-; GFX8-NEXT: flat_load_dwordx4 v[8:11], v[0:1]
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, 16, v0
+; GFX8-NEXT: v_addc_u32_e32 v9, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_load_dwordx4 v[4:7], v[0:1]
+; GFX8-NEXT: flat_load_dwordx4 v[8:11], v[8:9]
; GFX8-NEXT: v_add_u32_e32 v0, vcc, 16, v2
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[4:7]
+; GFX8-NEXT: flat_store_dwordx4 v[2:3], v[4:7]
; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: flat_store_dwordx4 v[2:3], v[8:11]
+; GFX8-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -4395,9 +4395,7 @@ define void @test_call_v8bf16(<8 x bfloat> %in, ptr addrspace(5) %out) {
; GFX11-NEXT: v_writelane_b32 v5, s31, 1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX11-NEXT: scratch_store_b64 v4, v[2:3], off offset:8 dlc
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: scratch_store_b64 v4, v[0:1], off dlc
+; GFX11-NEXT: scratch_store_b128 v4, v[0:3], off dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: v_readlane_b32 s31, v5, 1
; GFX11-NEXT: v_readlane_b32 s30, v5, 0
@@ -4751,18 +4749,12 @@ define void @test_call_v16bf16(<16 x bfloat> %in, ptr addrspace(5) %out) {
; GFX11-NEXT: v_writelane_b32 v9, s31, 1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX11-NEXT: v_add_nc_u32_e32 v10, 24, v8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-NEXT: v_readlane_b32 s31, v9, 1
-; GFX11-NEXT: v_readlane_b32 s30, v9, 0
-; GFX11-NEXT: scratch_store_b64 v10, v[6:7], off dlc
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: scratch_store_b64 v8, v[4:5], off offset:16 dlc
+; GFX11-NEXT: scratch_store_b128 v8, v[4:7], off offset:16 dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: scratch_store_b64 v8, v[2:3], off offset:8 dlc
-; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: scratch_store_b64 v8, v[0:1], off dlc
+; GFX11-NEXT: scratch_store_b128 v8, v[0:3], off dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: v_readlane_b32 s31, v9, 1
+; GFX11-NEXT: v_readlane_b32 s30, v9, 0
; GFX11-NEXT: s_xor_saveexec_b32 s0, -1
; GFX11-NEXT: scratch_load_b32 v9, off, s33 ; 4-byte Folded Reload
; GFX11-NEXT: s_mov_b32 exec_lo, s0
@@ -5470,60 +5462,48 @@ define <5 x float> @global_extload_v5bf16_to_v5f32(ptr addrspace(1) %ptr) {
; GFX8-LABEL: global_extload_v5bf16_to_v5f32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v0
-; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_ushort v4, v[0:1]
-; GFX8-NEXT: s_waitcnt vmcnt(1)
+; GFX8-NEXT: flat_load_dwordx4 v[2:5], v[0:1]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v3
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: global_extload_v5bf16_to_v5f32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_ushort v4, v[0:1], off offset:8
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX9-NEXT: global_load_dwordx4 v[2:5], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v3
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_extload_v5bf16_to_v5f32:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
-; GFX10-NEXT: global_load_ushort v4, v[0:1], off offset:8
-; GFX10-NEXT: s_waitcnt vmcnt(1)
+; GFX10-NEXT: global_load_dwordx4 v[2:5], v[0:1], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v3
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: global_extload_v5bf16_to_v5f32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b64 v[2:3], v[0:1], off
-; GFX11-NEXT: global_load_u16 v4, v[0:1], off offset:8
-; GFX11-NEXT: s_waitcnt vmcnt(1)
+; GFX11-NEXT: global_load_b128 v[2:5], v[0:1], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v3
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX11-NEXT: s_setpc_b64 s[30:31]
%load = load <5 x bfloat>, ptr addrspace(1) %ptr
@@ -6045,138 +6025,138 @@ define <32 x float> @global_extload_v32bf16_to_v32f32(ptr addrspace(1) %ptr) {
; GFX9-LABEL: global_extload_v32bf16_to_v32f32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx4 v[4:7], v[0:1], off
-; GFX9-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:16
-; GFX9-NEXT: global_load_dwordx4 v[20:23], v[0:1], off offset:32
-; GFX9-NEXT: global_load_dwordx4 v[28:31], v[0:1], off offset:48
+; GFX9-NEXT: global_load_dwordx4 v[16:19], v[0:1], off
+; GFX9-NEXT: global_load_dwordx4 v[20:23], v[0:1], off offset:16
+; GFX9-NEXT: global_load_dwordx4 v[24:27], v[0:1], off offset:32
+; GFX9-NEXT: global_load_dwordx4 v[32:35], v[0:1], off offset:48
; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
-; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v5
-; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v16
+; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v17
+; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v18
+; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v19
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v12
-; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v13
-; GFX9-NEXT: v_lshlrev_b32_e32 v12, 16, v14
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v14
-; GFX9-NEXT: v_lshlrev_b32_e32 v14, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v20
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v21
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v22
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v23
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v16
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v17
+; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 16, v19
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v20
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 16, v21
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 16, v22
+; GFX9-NEXT: v_lshlrev_b32_e32 v14, 16, v23
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v20
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v21
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v22
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v23
-; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v24
+; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v25
+; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v26
+; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v27
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v25
+; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v26
+; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v27
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v28
-; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v29
-; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v30
-; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v31
-; GFX9-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v32
+; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v33
+; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v34
+; GFX9-NEXT: v_and_b32_e32 v31, 0xffff0000, v35
+; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v32
+; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v33
+; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v34
+; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v35
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_extload_v32bf16_to_v32f32:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_clause 0x3
-; GFX10-NEXT: global_load_dwordx4 v[4:7], v[0:1], off
-; GFX10-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:16
-; GFX10-NEXT: global_load_dwordx4 v[20:23], v[0:1], off offset:32
-; GFX10-NEXT: global_load_dwordx4 v[28:31], v[0:1], off offset:48
+; GFX10-NEXT: global_load_dwordx4 v[32:35], v[0:1], off
+; GFX10-NEXT: global_load_dwordx4 v[36:39], v[0:1], off offset:16
+; GFX10-NEXT: global_load_dwordx4 v[48:51], v[0:1], off offset:32
+; GFX10-NEXT: global_load_dwordx4 v[52:55], v[0:1], off offset:48
; GFX10-NEXT: s_waitcnt vmcnt(3)
-; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v5
-; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v32
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v33
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v34
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v35
; GFX10-NEXT: s_waitcnt vmcnt(2)
-; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v12
-; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v12
-; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v13
-; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v13
-; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v14
-; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v14
-; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v15
-; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v36
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v37
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v38
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v39
; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v20
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
-; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v21
-; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v21
-; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v22
-; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v22
-; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v23
-; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v48
+; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v49
+; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v50
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v51
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v28
-; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v29
-; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v30
-; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v52
+; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v53
+; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v54
+; GFX10-NEXT: v_and_b32_e32 v31, 0xffff0000, v55
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v32
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v33
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v34
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v35
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v36
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v37
+; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v38
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v39
+; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v48
+; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v49
+; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v50
+; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v51
+; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v52
+; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v53
+; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v54
+; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v55
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: global_extload_v32bf16_to_v32f32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_clause 0x3
-; GFX11-NEXT: global_load_b128 v[4:7], v[0:1], off
-; GFX11-NEXT: global_load_b128 v[12:15], v[0:1], off offset:16
-; GFX11-NEXT: global_load_b128 v[20:23], v[0:1], off offset:32
-; GFX11-NEXT: global_load_b128 v[28:31], v[0:1], off offset:48
+; GFX11-NEXT: global_load_b128 v[32:35], v[0:1], off
+; GFX11-NEXT: global_load_b128 v[36:39], v[0:1], off offset:16
+; GFX11-NEXT: global_load_b128 v[48:51], v[0:1], off offset:32
+; GFX11-NEXT: global_load_b128 v[52:55], v[0:1], off offset:48
; GFX11-NEXT: s_waitcnt vmcnt(3)
-; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v5
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v6
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v32
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v33
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v34
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v35
; GFX11-NEXT: s_waitcnt vmcnt(2)
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v13
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v14
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v36
+; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v37
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v38
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v39
; GFX11-NEXT: s_waitcnt vmcnt(1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 16, v22
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v22, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v48
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v49
+; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v50
+; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v51
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v24, 16, v28
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v28
-; GFX11-NEXT: v_lshlrev_b32_e32 v26, 16, v29
-; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v29
-; GFX11-NEXT: v_lshlrev_b32_e32 v28, 16, v30
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v30, 16, v31
-; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v52
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v53
+; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v54
+; GFX11-NEXT: v_and_b32_e32 v31, 0xffff0000, v55
+; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v32
+; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v33
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v34
+; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v35
+; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v38
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v39
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v48
+; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v49
+; GFX11-NEXT: v_lshlrev_b32_e32 v20, 16, v50
+; GFX11-NEXT: v_lshlrev_b32_e32 v22, 16, v51
+; GFX11-NEXT: v_lshlrev_b32_e32 v24, 16, v52
+; GFX11-NEXT: v_lshlrev_b32_e32 v26, 16, v53
+; GFX11-NEXT: v_lshlrev_b32_e32 v28, 16, v54
+; GFX11-NEXT: v_lshlrev_b32_e32 v30, 16, v55
; GFX11-NEXT: s_setpc_b64 s[30:31]
%load = load <32 x bfloat>, ptr addrspace(1) %ptr
%fpext = fpext <32 x bfloat> %load to <32 x float>
@@ -6511,20 +6491,16 @@ define <5 x double> @global_extload_v5bf16_to_v5f64(ptr addrspace(1) %ptr) {
; GFX8-LABEL: global_extload_v5bf16_to_v5f64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, 8, v0
-; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GFX8-NEXT: flat_load_ushort v8, v[0:1]
-; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v2
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX8-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
+; GFX8-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; GFX8-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
-; GFX8-NEXT: v_cvt_f64_f32_e32 v[4:5], v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v0
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v1
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX8-NEXT: v_cvt_f64_f32_e32 v[0:1], v3
+; GFX8-NEXT: v_cvt_f64_f32_e32 v[2:3], v4
+; GFX8-NEXT: v_cvt_f64_f32_e32 v[4:5], v5
; GFX8-NEXT: v_cvt_f64_f32_e32 v[6:7], v6
; GFX8-NEXT: v_cvt_f64_f32_e32 v[8:9], v8
; GFX8-NEXT: s_setpc_b64 s[30:31]
@@ -6532,34 +6508,29 @@ define <5 x double> @global_extload_v5bf16_to_v5f64(ptr addrspace(1) %ptr) {
; GFX9-LABEL: global_extload_v5bf16_to_v5f64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_ushort v4, v[0:1], off offset:8
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_lshlrev_b32_e32 v0, 16, v4
+; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX9-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX9-NEXT: v_cvt_f64_f32_e32 v[8:9], v0
-; GFX9-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
-; GFX9-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
-; GFX9-NEXT: v_cvt_f64_f32_e32 v[4:5], v4
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v5, 16, v1
+; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX9-NEXT: v_cvt_f64_f32_e32 v[0:1], v3
+; GFX9-NEXT: v_cvt_f64_f32_e32 v[2:3], v4
+; GFX9-NEXT: v_cvt_f64_f32_e32 v[4:5], v5
; GFX9-NEXT: v_cvt_f64_f32_e32 v[6:7], v6
+; GFX9-NEXT: v_cvt_f64_f32_e32 v[8:9], v8
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: global_extload_v5bf16_to_v5f64:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
-; GFX10-NEXT: global_load_ushort v4, v[0:1], off offset:8
-; GFX10-NEXT: s_waitcnt vmcnt(1)
+; GFX10-NEXT: global_load_dwordx4 v[2:5], v[0:1], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v3
; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v4
; GFX10-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
; GFX10-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
@@ -6571,15 +6542,12 @@ define <5 x double> @global_extload_v5bf16_to_v5f64(ptr addrspace(1) %ptr) {
; GFX11-LABEL: global_extload_v5bf16_to_v5f64:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b64 v[2:3], v[0:1], off
-; GFX11-NEXT: global_load_u16 v4, v[0:1], off offset:8
-; GFX11-NEXT: s_waitcnt vmcnt(1)
+; GFX11-NEXT: global_load_b128 v[2:5], v[0:1], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v3
; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v4
; GFX11-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
@@ -9865,480 +9833,483 @@ define <32 x bfloat> @v_fadd_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-LABEL: v_fadd_v32bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX8-NEXT: v_add_f32_e32 v31, v32, v31
-; GFX8-NEXT: v_add_f32_e32 v30, v14, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX8-NEXT: v_add_f32_e32 v14, v32, v14
-; GFX8-NEXT: v_add_f32_e32 v13, v13, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX8-NEXT: v_add_f32_e32 v29, v32, v29
-; GFX8-NEXT: v_add_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX8-NEXT: v_add_f32_e32 v28, v32, v28
-; GFX8-NEXT: v_add_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX8-NEXT: v_add_f32_e32 v27, v32, v27
-; GFX8-NEXT: v_add_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX8-NEXT: v_add_f32_e32 v26, v32, v26
-; GFX8-NEXT: v_add_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX8-NEXT: v_add_f32_e32 v8, v8, v24
-; GFX8-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX8-NEXT: v_add_f32_e32 v25, v32, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16
-; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16
-; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16
-; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16
-; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16
-; GFX8-NEXT: v_alignbit_b32 v13, v13, v14, 16
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_add_f32_e32 v32, v32, v33
-; GFX8-NEXT: v_add_f32_e32 v15, v15, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX8-NEXT: v_add_f32_e32 v24, v33, v24
-; GFX8-NEXT: v_add_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX8-NEXT: v_add_f32_e32 v23, v33, v23
-; GFX8-NEXT: v_add_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX8-NEXT: v_add_f32_e32 v22, v33, v22
-; GFX8-NEXT: v_add_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX8-NEXT: v_add_f32_e32 v21, v33, v21
-; GFX8-NEXT: v_add_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_add_f32_e32 v20, v33, v20
-; GFX8-NEXT: v_add_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX8-NEXT: v_add_f32_e32 v19, v33, v19
-; GFX8-NEXT: v_add_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v1
-; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_add_f32_e32 v18, v33, v18
-; GFX8-NEXT: v_add_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v0
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_add_f32_e32 v17, v33, v17
+; GFX8-NEXT: v_add_f32_e32 v31, v32, v31
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v1
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX8-NEXT: v_add_f32_e32 v1, v1, v17
+; GFX8-NEXT: v_add_f32_e32 v16, v31, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_alignbit_b32 v1, v1, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX8-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX8-NEXT: v_add_f32_e32 v2, v2, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_alignbit_b32 v2, v2, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX8-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX8-NEXT: v_add_f32_e32 v3, v3, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_alignbit_b32 v3, v3, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX8-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX8-NEXT: v_add_f32_e32 v4, v4, v17
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_alignbit_b32 v4, v4, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v5
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v21
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX8-NEXT: v_add_f32_e32 v5, v5, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_alignbit_b32 v5, v5, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v22
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX8-NEXT: v_add_f32_e32 v6, v6, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v7
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v23
+; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX8-NEXT: v_add_f32_e32 v7, v7, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_alignbit_b32 v7, v7, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v8
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v24
+; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX8-NEXT: v_add_f32_e32 v8, v8, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_alignbit_b32 v8, v8, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v25
+; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX8-NEXT: v_add_f32_e32 v9, v9, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_alignbit_b32 v9, v9, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v10
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
+; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX8-NEXT: v_add_f32_e32 v10, v10, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX8-NEXT: v_alignbit_b32 v10, v10, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v11
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v27
+; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX8-NEXT: v_add_f32_e32 v11, v11, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_alignbit_b32 v11, v11, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v12
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v28
+; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX8-NEXT: v_add_f32_e32 v12, v12, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_alignbit_b32 v12, v12, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v13
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v29
+; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX8-NEXT: v_add_f32_e32 v13, v13, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_alignbit_b32 v13, v13, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v14
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v30
+; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX8-NEXT: v_add_f32_e32 v14, v14, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX8-NEXT: v_alignbit_b32 v14, v14, v16, 16
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v15
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX8-NEXT: v_add_f32_e32 v15, v15, v17
+; GFX8-NEXT: v_add_f32_e32 v16, v18, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v30
-; GFX8-NEXT: v_alignbit_b32 v0, v0, v17, 16
-; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16
-; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16
-; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16
-; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16
-; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16
-; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16
-; GFX8-NEXT: v_alignbit_b32 v14, v16, v31, 16
-; GFX8-NEXT: v_alignbit_b32 v15, v15, v32, 16
+; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_fadd_v32bf16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_add_f32_e32 v31, v32, v31
-; GFX9-NEXT: v_add_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_add_f32_e32 v30, v32, v30
-; GFX9-NEXT: v_add_f32_e32 v13, v13, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX9-NEXT: v_add_f32_e32 v29, v32, v29
-; GFX9-NEXT: v_add_f32_e32 v12, v12, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX9-NEXT: v_add_f32_e32 v28, v32, v28
-; GFX9-NEXT: v_add_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX9-NEXT: v_add_f32_e32 v27, v32, v27
-; GFX9-NEXT: v_add_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX9-NEXT: v_add_f32_e32 v26, v32, v26
-; GFX9-NEXT: v_add_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX9-NEXT: v_add_f32_e32 v8, v8, v24
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX9-NEXT: v_add_f32_e32 v25, v32, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v16
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
-; GFX9-NEXT: v_perm_b32 v8, v8, v25, s4
-; GFX9-NEXT: v_perm_b32 v9, v9, v26, s4
-; GFX9-NEXT: v_perm_b32 v10, v10, v27, s4
-; GFX9-NEXT: v_perm_b32 v11, v11, v28, s4
-; GFX9-NEXT: v_perm_b32 v12, v12, v29, s4
-; GFX9-NEXT: v_perm_b32 v13, v13, v30, s4
-; GFX9-NEXT: v_perm_b32 v14, v14, v31, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_add_f32_e32 v32, v32, v33
-; GFX9-NEXT: v_add_f32_e32 v15, v15, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX9-NEXT: v_add_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_add_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX9-NEXT: v_add_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_add_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX9-NEXT: v_add_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_add_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX9-NEXT: v_add_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_add_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX9-NEXT: v_add_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_add_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX9-NEXT: v_add_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_add_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
+; GFX9-NEXT: v_perm_b32 v0, v0, v31, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v1
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX9-NEXT: v_add_f32_e32 v18, v33, v18
+; GFX9-NEXT: v_add_f32_e32 v16, v31, v16
; GFX9-NEXT: v_add_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX9-NEXT: v_add_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_add_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_perm_b32 v0, v0, v17, s4
-; GFX9-NEXT: v_perm_b32 v1, v1, v18, s4
-; GFX9-NEXT: v_perm_b32 v2, v2, v19, s4
-; GFX9-NEXT: v_perm_b32 v3, v3, v20, s4
-; GFX9-NEXT: v_perm_b32 v4, v4, v21, s4
-; GFX9-NEXT: v_perm_b32 v5, v5, v22, s4
-; GFX9-NEXT: v_perm_b32 v6, v6, v23, s4
-; GFX9-NEXT: v_perm_b32 v7, v7, v24, s4
-; GFX9-NEXT: v_perm_b32 v15, v15, v32, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_add_f32_e32 v2, v2, v17
+; GFX9-NEXT: v_perm_b32 v2, v2, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_add_f32_e32 v3, v3, v17
+; GFX9-NEXT: v_perm_b32 v3, v3, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_add_f32_e32 v4, v4, v17
+; GFX9-NEXT: v_perm_b32 v4, v4, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v21
+; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_add_f32_e32 v5, v5, v17
+; GFX9-NEXT: v_perm_b32 v5, v5, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v22
+; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_add_f32_e32 v6, v6, v17
+; GFX9-NEXT: v_perm_b32 v6, v6, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v23
+; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_add_f32_e32 v7, v7, v17
+; GFX9-NEXT: v_perm_b32 v7, v7, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v8
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v24
+; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_add_f32_e32 v8, v8, v17
+; GFX9-NEXT: v_perm_b32 v8, v8, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v9
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v25
+; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_add_f32_e32 v9, v9, v17
+; GFX9-NEXT: v_perm_b32 v9, v9, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v10
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v26
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_add_f32_e32 v10, v10, v17
+; GFX9-NEXT: v_perm_b32 v10, v10, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v11
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v27
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_add_f32_e32 v11, v11, v17
+; GFX9-NEXT: v_perm_b32 v11, v11, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v12
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_add_f32_e32 v12, v12, v17
+; GFX9-NEXT: v_perm_b32 v12, v12, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v13
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v29
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX9-NEXT: v_add_f32_e32 v13, v13, v17
+; GFX9-NEXT: v_perm_b32 v13, v13, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v14
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v30
+; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_add_f32_e32 v14, v14, v17
+; GFX9-NEXT: v_perm_b32 v14, v14, v16, s4
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v15
+; GFX9-NEXT: v_add_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_add_f32_e32 v15, v15, v17
+; GFX9-NEXT: v_perm_b32 v15, v15, v16, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fadd_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
-; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT: v_add_f32_e32 v39, v48, v39
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v17
-; GFX10-NEXT: v_add_f32_e32 v11, v11, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v1
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_add_f32_e32 v49, v50, v49
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v16
-; GFX10-NEXT: v_add_f32_e32 v10, v10, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v5
; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_add_f32_e32 v33, v34, v33
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v20
-; GFX10-NEXT: v_add_f32_e32 v14, v14, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v4
-; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_add_f32_e32 v35, v36, v35
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v19
-; GFX10-NEXT: v_add_f32_e32 v13, v13, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_add_f32_e32 v37, v38, v37
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v18
-; GFX10-NEXT: v_add_f32_e32 v12, v12, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v22
+; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v2
; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v3
+; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_add_f32_e32 v5, v5, v21
+; GFX10-NEXT: v_add_f32_e32 v21, v53, v52
+; GFX10-NEXT: v_add_f32_e32 v6, v6, v22
+; GFX10-NEXT: v_add_f32_e32 v22, v55, v54
+; GFX10-NEXT: v_add_f32_e32 v7, v7, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v25
+; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX10-NEXT: v_add_f32_e32 v32, v33, v32
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v10
+; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX10-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX10-NEXT: v_add_f32_e32 v34, v35, v34
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v11
+; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_add_f32_e32 v1, v1, v17
-; GFX10-NEXT: v_add_f32_e32 v51, v52, v51
-; GFX10-NEXT: v_add_f32_e32 v9, v9, v25
-; GFX10-NEXT: v_add_f32_e32 v25, v54, v53
-; GFX10-NEXT: v_add_f32_e32 v8, v8, v24
-; GFX10-NEXT: v_add_f32_e32 v24, v64, v55
-; GFX10-NEXT: v_add_f32_e32 v7, v7, v23
-; GFX10-NEXT: v_add_f32_e32 v23, v66, v65
-; GFX10-NEXT: v_add_f32_e32 v6, v6, v22
-; GFX10-NEXT: v_add_f32_e32 v22, v68, v67
-; GFX10-NEXT: v_add_f32_e32 v5, v5, v21
-; GFX10-NEXT: v_add_f32_e32 v21, v30, v34
-; GFX10-NEXT: v_add_f32_e32 v29, v29, v36
-; GFX10-NEXT: v_add_f32_e32 v28, v28, v38
-; GFX10-NEXT: v_add_f32_e32 v27, v27, v48
-; GFX10-NEXT: v_add_f32_e32 v26, v26, v50
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v28
+; GFX10-NEXT: v_add_f32_e32 v36, v37, v36
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v12
+; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX10-NEXT: v_add_f32_e32 v2, v2, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; GFX10-NEXT: v_add_f32_e32 v38, v39, v38
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v13
+; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX10-NEXT: v_add_f32_e32 v3, v3, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v30
+; GFX10-NEXT: v_add_f32_e32 v48, v49, v48
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v14
+; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX10-NEXT: v_add_f32_e32 v4, v4, v20
-; GFX10-NEXT: v_perm_b32 v1, v1, v27, 0x7060302
-; GFX10-NEXT: v_perm_b32 v0, v0, v26, 0x7060302
-; GFX10-NEXT: v_perm_b32 v2, v2, v28, 0x7060302
-; GFX10-NEXT: v_perm_b32 v3, v3, v29, 0x7060302
-; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX10-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX10-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
-; GFX10-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX10-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX10-NEXT: v_perm_b32 v9, v9, v51, 0x7060302
-; GFX10-NEXT: v_perm_b32 v10, v10, v49, 0x7060302
-; GFX10-NEXT: v_perm_b32 v11, v11, v39, 0x7060302
-; GFX10-NEXT: v_perm_b32 v12, v12, v37, 0x7060302
-; GFX10-NEXT: v_perm_b32 v13, v13, v35, 0x7060302
-; GFX10-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v15
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX10-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX10-NEXT: v_add_f32_e32 v50, v51, v50
+; GFX10-NEXT: v_add_f32_e32 v23, v65, v64
+; GFX10-NEXT: v_add_f32_e32 v8, v8, v24
+; GFX10-NEXT: v_add_f32_e32 v24, v67, v66
+; GFX10-NEXT: v_add_f32_e32 v9, v9, v25
+; GFX10-NEXT: v_add_f32_e32 v25, v33, v68
+; GFX10-NEXT: v_add_f32_e32 v10, v10, v26
+; GFX10-NEXT: v_add_f32_e32 v16, v35, v16
+; GFX10-NEXT: v_add_f32_e32 v11, v11, v27
+; GFX10-NEXT: v_add_f32_e32 v17, v37, v17
+; GFX10-NEXT: v_add_f32_e32 v12, v12, v28
+; GFX10-NEXT: v_add_f32_e32 v18, v39, v18
+; GFX10-NEXT: v_add_f32_e32 v13, v13, v29
+; GFX10-NEXT: v_add_f32_e32 v19, v49, v19
+; GFX10-NEXT: v_add_f32_e32 v14, v14, v30
+; GFX10-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v34, 0x7060302
+; GFX10-NEXT: v_perm_b32 v2, v2, v36, 0x7060302
+; GFX10-NEXT: v_perm_b32 v3, v3, v38, 0x7060302
+; GFX10-NEXT: v_perm_b32 v4, v4, v48, 0x7060302
+; GFX10-NEXT: v_perm_b32 v5, v5, v50, 0x7060302
+; GFX10-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX10-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX10-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX10-NEXT: v_perm_b32 v11, v11, v16, 0x7060302
+; GFX10-NEXT: v_perm_b32 v12, v12, v17, 0x7060302
+; GFX10-NEXT: v_perm_b32 v13, v13, v18, 0x7060302
+; GFX10-NEXT: v_perm_b32 v14, v14, v19, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX10-NEXT: v_add_f32_e32 v16, v32, v16
-; GFX10-NEXT: v_add_f32_e32 v15, v15, v17
-; GFX10-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v31
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v31
+; GFX10-NEXT: v_add_f32_e32 v20, v20, v21
+; GFX10-NEXT: v_add_f32_e32 v15, v15, v22
+; GFX10-NEXT: v_perm_b32 v15, v15, v20, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fadd_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: scratch_load_b32 v31, off, s32
-; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_dual_add_f32 v0, v0, v16 :: v_dual_and_b32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v10
+; GFX11-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v27
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v5
+; GFX11-NEXT: v_dual_add_f32 v10, v10, v26 :: v_dual_and_b32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v25
; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_dual_add_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
-; GFX11-NEXT: v_dual_add_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_add_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_add_f32 v2, v2, v18 :: v_dual_add_f32 v3, v3, v19
-; GFX11-NEXT: v_dual_add_f32 v4, v4, v20 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_dual_add_f32 v9, v9, v25 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_add_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_dual_add_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v28
+; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v22
; GFX11-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_add_f32_e32 v10, v10, v26
-; GFX11-NEXT: v_add_f32_e32 v26, v52, v51
-; GFX11-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX11-NEXT: v_add_f32_e32 v25, v54, v53
-; GFX11-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v29
+; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v13
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_add_f32 v8, v8, v24 :: v_dual_and_b32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_add_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_add_f32_e32 v7, v7, v23
-; GFX11-NEXT: v_add_f32_e32 v23, v66, v65
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_add_f32 v12, v12, v28 :: v_dual_and_b32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_add_f32 v28, v48, v39 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_dual_add_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_dual_add_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_add_f32 v27, v50, v49 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_add_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v30
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v14
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX11-NEXT: v_dual_add_f32 v11, v11, v27 :: v_dual_and_b32 v20, 0xffff0000, v20
; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_add_f32_e32 v37, v86, v85
-; GFX11-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_dual_add_f32 v26, v71, v70 :: v_dual_lshlrev_b32 v49, 16, v4
+; GFX11-NEXT: v_dual_add_f32 v13, v13, v29 :: v_dual_and_b32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_add_f32_e32 v4, v4, v20
+; GFX11-NEXT: v_dual_add_f32 v8, v8, v24 :: v_dual_add_f32 v9, v9, v25
+; GFX11-NEXT: v_add_f32_e32 v25, v69, v68
+; GFX11-NEXT: v_dual_add_f32 v20, v51, v50 :: v_dual_lshlrev_b32 v39, 16, v3
+; GFX11-NEXT: v_add_f32_e32 v27, v81, v80
+; GFX11-NEXT: v_add_f32_e32 v12, v12, v28
+; GFX11-NEXT: v_dual_add_f32 v28, v83, v82 :: v_dual_add_f32 v29, v85, v84
+; GFX11-NEXT: v_dual_add_f32 v6, v6, v22 :: v_dual_and_b32 v3, 0xffff0000, v3
+; GFX11-NEXT: v_add_f32_e32 v22, v55, v54
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX11-NEXT: v_add_f32_e32 v14, v14, v30
-; GFX11-NEXT: v_dual_add_f32 v30, v36, v35 :: v_dual_add_f32 v33, v34, v33
-; GFX11-NEXT: v_dual_add_f32 v34, v80, v71 :: v_dual_add_f32 v35, v82, v81
-; GFX11-NEXT: v_add_f32_e32 v36, v84, v83
-; GFX11-NEXT: v_dual_add_f32 v16, v32, v16 :: v_dual_add_f32 v15, v15, v17
-; GFX11-NEXT: v_perm_b32 v0, v0, v37, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_perm_b32 v2, v2, v35, 0x7060302
-; GFX11-NEXT: v_perm_b32 v1, v1, v36, 0x7060302
-; GFX11-NEXT: v_perm_b32 v3, v3, v34, 0x7060302
-; GFX11-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
-; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
-; GFX11-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX11-NEXT: v_dual_add_f32 v7, v7, v23 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_add_f32_e32 v23, v65, v64
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX11-NEXT: v_dual_add_f32 v24, v67, v66 :: v_dual_and_b32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_add_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_dual_add_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v32, 16, v16
+; GFX11-NEXT: v_add_f32_e32 v18, v39, v38
+; GFX11-NEXT: v_dual_add_f32 v3, v3, v19 :: v_dual_and_b32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_add_f32_e32 v19, v49, v48
+; GFX11-NEXT: v_add_f32_e32 v17, v37, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX11-NEXT: v_dual_add_f32 v5, v5, v21 :: v_dual_and_b32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_add_f32_e32 v21, v53, v52
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_perm_b32 v2, v2, v17, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v18, 0x7060302
+; GFX11-NEXT: v_add_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_add_f32_e32 v16, v35, v34
+; GFX11-NEXT: v_add_f32_e32 v32, v33, v32
+; GFX11-NEXT: v_perm_b32 v4, v4, v19, 0x7060302
+; GFX11-NEXT: v_perm_b32 v5, v5, v20, 0x7060302
+; GFX11-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX11-NEXT: v_perm_b32 v1, v1, v16, 0x7060302
+; GFX11-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX11-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX11-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX11-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX11-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX11-NEXT: v_perm_b32 v11, v11, v26, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v27, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v14, v14, v29, 0x7060302
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_add_f32 v16, v86, v16 :: v_dual_and_b32 v17, 0xffff0000, v31
+; GFX11-NEXT: v_add_f32_e32 v15, v15, v17
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = fadd <32 x bfloat> %a, %b
@@ -12177,480 +12148,483 @@ define <32 x bfloat> @v_fmul_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-LABEL: v_fmul_v32bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX8-NEXT: v_mul_f32_e32 v31, v32, v31
-; GFX8-NEXT: v_mul_f32_e32 v30, v14, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX8-NEXT: v_mul_f32_e32 v14, v32, v14
-; GFX8-NEXT: v_mul_f32_e32 v13, v13, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX8-NEXT: v_mul_f32_e32 v29, v32, v29
-; GFX8-NEXT: v_mul_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX8-NEXT: v_mul_f32_e32 v28, v32, v28
-; GFX8-NEXT: v_mul_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX8-NEXT: v_mul_f32_e32 v27, v32, v27
-; GFX8-NEXT: v_mul_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX8-NEXT: v_mul_f32_e32 v26, v32, v26
-; GFX8-NEXT: v_mul_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX8-NEXT: v_mul_f32_e32 v8, v8, v24
-; GFX8-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX8-NEXT: v_mul_f32_e32 v25, v32, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16
-; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16
-; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16
-; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16
-; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16
-; GFX8-NEXT: v_alignbit_b32 v13, v13, v14, 16
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_mul_f32_e32 v32, v32, v33
-; GFX8-NEXT: v_mul_f32_e32 v15, v15, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX8-NEXT: v_mul_f32_e32 v24, v33, v24
-; GFX8-NEXT: v_mul_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX8-NEXT: v_mul_f32_e32 v23, v33, v23
-; GFX8-NEXT: v_mul_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX8-NEXT: v_mul_f32_e32 v22, v33, v22
-; GFX8-NEXT: v_mul_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX8-NEXT: v_mul_f32_e32 v21, v33, v21
-; GFX8-NEXT: v_mul_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_mul_f32_e32 v20, v33, v20
-; GFX8-NEXT: v_mul_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX8-NEXT: v_mul_f32_e32 v19, v33, v19
-; GFX8-NEXT: v_mul_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v1
-; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_mul_f32_e32 v18, v33, v18
-; GFX8-NEXT: v_mul_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v0
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_mul_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_mul_f32_e32 v17, v33, v17
+; GFX8-NEXT: v_mul_f32_e32 v31, v32, v31
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v1
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX8-NEXT: v_mul_f32_e32 v1, v1, v17
+; GFX8-NEXT: v_mul_f32_e32 v16, v31, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_alignbit_b32 v1, v1, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX8-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX8-NEXT: v_mul_f32_e32 v2, v2, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_alignbit_b32 v2, v2, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX8-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX8-NEXT: v_mul_f32_e32 v3, v3, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_alignbit_b32 v3, v3, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX8-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX8-NEXT: v_mul_f32_e32 v4, v4, v17
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_alignbit_b32 v4, v4, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v5
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v21
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX8-NEXT: v_mul_f32_e32 v5, v5, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_alignbit_b32 v5, v5, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v22
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX8-NEXT: v_mul_f32_e32 v6, v6, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v7
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v23
+; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX8-NEXT: v_mul_f32_e32 v7, v7, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_alignbit_b32 v7, v7, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v8
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v24
+; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX8-NEXT: v_mul_f32_e32 v8, v8, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_alignbit_b32 v8, v8, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v25
+; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX8-NEXT: v_mul_f32_e32 v9, v9, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_alignbit_b32 v9, v9, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v10
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
+; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX8-NEXT: v_mul_f32_e32 v10, v10, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX8-NEXT: v_alignbit_b32 v10, v10, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v11
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v27
+; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX8-NEXT: v_mul_f32_e32 v11, v11, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_alignbit_b32 v11, v11, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v12
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v28
+; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX8-NEXT: v_mul_f32_e32 v12, v12, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_alignbit_b32 v12, v12, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v13
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v29
+; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX8-NEXT: v_mul_f32_e32 v13, v13, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_alignbit_b32 v13, v13, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v14
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v30
+; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX8-NEXT: v_mul_f32_e32 v14, v14, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX8-NEXT: v_alignbit_b32 v14, v14, v16, 16
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v15
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX8-NEXT: v_mul_f32_e32 v15, v15, v17
+; GFX8-NEXT: v_mul_f32_e32 v16, v18, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v30
-; GFX8-NEXT: v_alignbit_b32 v0, v0, v17, 16
-; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16
-; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16
-; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16
-; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16
-; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16
-; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16
-; GFX8-NEXT: v_alignbit_b32 v14, v16, v31, 16
-; GFX8-NEXT: v_alignbit_b32 v15, v15, v32, 16
+; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_fmul_v32bf16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_mul_f32_e32 v31, v32, v31
-; GFX9-NEXT: v_mul_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_mul_f32_e32 v30, v32, v30
-; GFX9-NEXT: v_mul_f32_e32 v13, v13, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX9-NEXT: v_mul_f32_e32 v29, v32, v29
-; GFX9-NEXT: v_mul_f32_e32 v12, v12, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX9-NEXT: v_mul_f32_e32 v28, v32, v28
-; GFX9-NEXT: v_mul_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX9-NEXT: v_mul_f32_e32 v27, v32, v27
-; GFX9-NEXT: v_mul_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX9-NEXT: v_mul_f32_e32 v26, v32, v26
-; GFX9-NEXT: v_mul_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX9-NEXT: v_mul_f32_e32 v8, v8, v24
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX9-NEXT: v_mul_f32_e32 v25, v32, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_mul_f32_e32 v0, v0, v16
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
-; GFX9-NEXT: v_perm_b32 v8, v8, v25, s4
-; GFX9-NEXT: v_perm_b32 v9, v9, v26, s4
-; GFX9-NEXT: v_perm_b32 v10, v10, v27, s4
-; GFX9-NEXT: v_perm_b32 v11, v11, v28, s4
-; GFX9-NEXT: v_perm_b32 v12, v12, v29, s4
-; GFX9-NEXT: v_perm_b32 v13, v13, v30, s4
-; GFX9-NEXT: v_perm_b32 v14, v14, v31, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_mul_f32_e32 v32, v32, v33
-; GFX9-NEXT: v_mul_f32_e32 v15, v15, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX9-NEXT: v_mul_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_mul_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX9-NEXT: v_mul_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_mul_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX9-NEXT: v_mul_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_mul_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX9-NEXT: v_mul_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_mul_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX9-NEXT: v_mul_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_mul_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX9-NEXT: v_mul_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_mul_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
+; GFX9-NEXT: v_perm_b32 v0, v0, v31, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v1
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX9-NEXT: v_mul_f32_e32 v18, v33, v18
+; GFX9-NEXT: v_mul_f32_e32 v16, v31, v16
; GFX9-NEXT: v_mul_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX9-NEXT: v_mul_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_mul_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_perm_b32 v0, v0, v17, s4
-; GFX9-NEXT: v_perm_b32 v1, v1, v18, s4
-; GFX9-NEXT: v_perm_b32 v2, v2, v19, s4
-; GFX9-NEXT: v_perm_b32 v3, v3, v20, s4
-; GFX9-NEXT: v_perm_b32 v4, v4, v21, s4
-; GFX9-NEXT: v_perm_b32 v5, v5, v22, s4
-; GFX9-NEXT: v_perm_b32 v6, v6, v23, s4
-; GFX9-NEXT: v_perm_b32 v7, v7, v24, s4
-; GFX9-NEXT: v_perm_b32 v15, v15, v32, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_mul_f32_e32 v2, v2, v17
+; GFX9-NEXT: v_perm_b32 v2, v2, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_mul_f32_e32 v3, v3, v17
+; GFX9-NEXT: v_perm_b32 v3, v3, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_mul_f32_e32 v4, v4, v17
+; GFX9-NEXT: v_perm_b32 v4, v4, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v21
+; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_mul_f32_e32 v5, v5, v17
+; GFX9-NEXT: v_perm_b32 v5, v5, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v22
+; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_mul_f32_e32 v6, v6, v17
+; GFX9-NEXT: v_perm_b32 v6, v6, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v23
+; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_mul_f32_e32 v7, v7, v17
+; GFX9-NEXT: v_perm_b32 v7, v7, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v8
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v24
+; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_mul_f32_e32 v8, v8, v17
+; GFX9-NEXT: v_perm_b32 v8, v8, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v9
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v25
+; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_mul_f32_e32 v9, v9, v17
+; GFX9-NEXT: v_perm_b32 v9, v9, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v10
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v26
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_mul_f32_e32 v10, v10, v17
+; GFX9-NEXT: v_perm_b32 v10, v10, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v11
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v27
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_mul_f32_e32 v11, v11, v17
+; GFX9-NEXT: v_perm_b32 v11, v11, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v12
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_mul_f32_e32 v12, v12, v17
+; GFX9-NEXT: v_perm_b32 v12, v12, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v13
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v29
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX9-NEXT: v_mul_f32_e32 v13, v13, v17
+; GFX9-NEXT: v_perm_b32 v13, v13, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v14
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v30
+; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_mul_f32_e32 v14, v14, v17
+; GFX9-NEXT: v_perm_b32 v14, v14, v16, s4
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v15
+; GFX9-NEXT: v_mul_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_mul_f32_e32 v15, v15, v17
+; GFX9-NEXT: v_perm_b32 v15, v15, v16, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmul_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
-; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT: v_mul_f32_e32 v39, v48, v39
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v17
-; GFX10-NEXT: v_mul_f32_e32 v11, v11, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v1
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_mul_f32_e32 v49, v50, v49
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v16
-; GFX10-NEXT: v_mul_f32_e32 v10, v10, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v5
; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_mul_f32_e32 v33, v34, v33
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v20
-; GFX10-NEXT: v_mul_f32_e32 v14, v14, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v4
-; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_mul_f32_e32 v35, v36, v35
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v19
-; GFX10-NEXT: v_mul_f32_e32 v13, v13, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_mul_f32_e32 v37, v38, v37
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v18
-; GFX10-NEXT: v_mul_f32_e32 v12, v12, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v22
+; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v2
; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v3
+; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_mul_f32_e32 v5, v5, v21
+; GFX10-NEXT: v_mul_f32_e32 v21, v53, v52
+; GFX10-NEXT: v_mul_f32_e32 v6, v6, v22
+; GFX10-NEXT: v_mul_f32_e32 v22, v55, v54
+; GFX10-NEXT: v_mul_f32_e32 v7, v7, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v25
+; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX10-NEXT: v_mul_f32_e32 v32, v33, v32
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v10
+; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX10-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX10-NEXT: v_mul_f32_e32 v34, v35, v34
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v11
+; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_mul_f32_e32 v1, v1, v17
-; GFX10-NEXT: v_mul_f32_e32 v51, v52, v51
-; GFX10-NEXT: v_mul_f32_e32 v9, v9, v25
-; GFX10-NEXT: v_mul_f32_e32 v25, v54, v53
-; GFX10-NEXT: v_mul_f32_e32 v8, v8, v24
-; GFX10-NEXT: v_mul_f32_e32 v24, v64, v55
-; GFX10-NEXT: v_mul_f32_e32 v7, v7, v23
-; GFX10-NEXT: v_mul_f32_e32 v23, v66, v65
-; GFX10-NEXT: v_mul_f32_e32 v6, v6, v22
-; GFX10-NEXT: v_mul_f32_e32 v22, v68, v67
-; GFX10-NEXT: v_mul_f32_e32 v5, v5, v21
-; GFX10-NEXT: v_mul_f32_e32 v21, v30, v34
-; GFX10-NEXT: v_mul_f32_e32 v29, v29, v36
-; GFX10-NEXT: v_mul_f32_e32 v28, v28, v38
-; GFX10-NEXT: v_mul_f32_e32 v27, v27, v48
-; GFX10-NEXT: v_mul_f32_e32 v26, v26, v50
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v28
+; GFX10-NEXT: v_mul_f32_e32 v36, v37, v36
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v12
+; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX10-NEXT: v_mul_f32_e32 v2, v2, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; GFX10-NEXT: v_mul_f32_e32 v38, v39, v38
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v13
+; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX10-NEXT: v_mul_f32_e32 v3, v3, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v30
+; GFX10-NEXT: v_mul_f32_e32 v48, v49, v48
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v14
+; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX10-NEXT: v_mul_f32_e32 v4, v4, v20
-; GFX10-NEXT: v_perm_b32 v1, v1, v27, 0x7060302
-; GFX10-NEXT: v_perm_b32 v0, v0, v26, 0x7060302
-; GFX10-NEXT: v_perm_b32 v2, v2, v28, 0x7060302
-; GFX10-NEXT: v_perm_b32 v3, v3, v29, 0x7060302
-; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX10-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX10-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
-; GFX10-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX10-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX10-NEXT: v_perm_b32 v9, v9, v51, 0x7060302
-; GFX10-NEXT: v_perm_b32 v10, v10, v49, 0x7060302
-; GFX10-NEXT: v_perm_b32 v11, v11, v39, 0x7060302
-; GFX10-NEXT: v_perm_b32 v12, v12, v37, 0x7060302
-; GFX10-NEXT: v_perm_b32 v13, v13, v35, 0x7060302
-; GFX10-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v15
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX10-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX10-NEXT: v_mul_f32_e32 v50, v51, v50
+; GFX10-NEXT: v_mul_f32_e32 v23, v65, v64
+; GFX10-NEXT: v_mul_f32_e32 v8, v8, v24
+; GFX10-NEXT: v_mul_f32_e32 v24, v67, v66
+; GFX10-NEXT: v_mul_f32_e32 v9, v9, v25
+; GFX10-NEXT: v_mul_f32_e32 v25, v33, v68
+; GFX10-NEXT: v_mul_f32_e32 v10, v10, v26
+; GFX10-NEXT: v_mul_f32_e32 v16, v35, v16
+; GFX10-NEXT: v_mul_f32_e32 v11, v11, v27
+; GFX10-NEXT: v_mul_f32_e32 v17, v37, v17
+; GFX10-NEXT: v_mul_f32_e32 v12, v12, v28
+; GFX10-NEXT: v_mul_f32_e32 v18, v39, v18
+; GFX10-NEXT: v_mul_f32_e32 v13, v13, v29
+; GFX10-NEXT: v_mul_f32_e32 v19, v49, v19
+; GFX10-NEXT: v_mul_f32_e32 v14, v14, v30
+; GFX10-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v34, 0x7060302
+; GFX10-NEXT: v_perm_b32 v2, v2, v36, 0x7060302
+; GFX10-NEXT: v_perm_b32 v3, v3, v38, 0x7060302
+; GFX10-NEXT: v_perm_b32 v4, v4, v48, 0x7060302
+; GFX10-NEXT: v_perm_b32 v5, v5, v50, 0x7060302
+; GFX10-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX10-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX10-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX10-NEXT: v_perm_b32 v11, v11, v16, 0x7060302
+; GFX10-NEXT: v_perm_b32 v12, v12, v17, 0x7060302
+; GFX10-NEXT: v_perm_b32 v13, v13, v18, 0x7060302
+; GFX10-NEXT: v_perm_b32 v14, v14, v19, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX10-NEXT: v_mul_f32_e32 v16, v32, v16
-; GFX10-NEXT: v_mul_f32_e32 v15, v15, v17
-; GFX10-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v31
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v31
+; GFX10-NEXT: v_mul_f32_e32 v20, v20, v21
+; GFX10-NEXT: v_mul_f32_e32 v15, v15, v22
+; GFX10-NEXT: v_perm_b32 v15, v15, v20, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_fmul_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: scratch_load_b32 v31, off, s32
-; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_dual_mul_f32 v0, v0, v16 :: v_dual_and_b32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v10
+; GFX11-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v27
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v5
+; GFX11-NEXT: v_dual_mul_f32 v10, v10, v26 :: v_dual_and_b32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v25
; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_dual_mul_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
-; GFX11-NEXT: v_dual_mul_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_mul_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_mul_f32 v2, v2, v18 :: v_dual_mul_f32 v3, v3, v19
-; GFX11-NEXT: v_dual_mul_f32 v4, v4, v20 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_dual_mul_f32 v9, v9, v25 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_mul_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_dual_mul_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v28
+; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v22
; GFX11-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_mul_f32_e32 v10, v10, v26
-; GFX11-NEXT: v_mul_f32_e32 v26, v52, v51
-; GFX11-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX11-NEXT: v_mul_f32_e32 v25, v54, v53
-; GFX11-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v29
+; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v13
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_mul_f32 v8, v8, v24 :: v_dual_and_b32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_mul_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_mul_f32_e32 v7, v7, v23
-; GFX11-NEXT: v_mul_f32_e32 v23, v66, v65
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_mul_f32 v12, v12, v28 :: v_dual_and_b32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_mul_f32 v28, v48, v39 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_dual_mul_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_dual_mul_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_mul_f32 v27, v50, v49 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_mul_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v30
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v14
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX11-NEXT: v_dual_mul_f32 v11, v11, v27 :: v_dual_and_b32 v20, 0xffff0000, v20
; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_mul_f32_e32 v37, v86, v85
-; GFX11-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_dual_mul_f32 v26, v71, v70 :: v_dual_lshlrev_b32 v49, 16, v4
+; GFX11-NEXT: v_dual_mul_f32 v13, v13, v29 :: v_dual_and_b32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_mul_f32_e32 v4, v4, v20
+; GFX11-NEXT: v_dual_mul_f32 v8, v8, v24 :: v_dual_mul_f32 v9, v9, v25
+; GFX11-NEXT: v_mul_f32_e32 v25, v69, v68
+; GFX11-NEXT: v_dual_mul_f32 v20, v51, v50 :: v_dual_lshlrev_b32 v39, 16, v3
+; GFX11-NEXT: v_mul_f32_e32 v27, v81, v80
+; GFX11-NEXT: v_mul_f32_e32 v12, v12, v28
+; GFX11-NEXT: v_dual_mul_f32 v28, v83, v82 :: v_dual_mul_f32 v29, v85, v84
+; GFX11-NEXT: v_dual_mul_f32 v6, v6, v22 :: v_dual_and_b32 v3, 0xffff0000, v3
+; GFX11-NEXT: v_mul_f32_e32 v22, v55, v54
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX11-NEXT: v_mul_f32_e32 v14, v14, v30
-; GFX11-NEXT: v_dual_mul_f32 v30, v36, v35 :: v_dual_mul_f32 v33, v34, v33
-; GFX11-NEXT: v_dual_mul_f32 v34, v80, v71 :: v_dual_mul_f32 v35, v82, v81
-; GFX11-NEXT: v_mul_f32_e32 v36, v84, v83
-; GFX11-NEXT: v_dual_mul_f32 v16, v32, v16 :: v_dual_mul_f32 v15, v15, v17
-; GFX11-NEXT: v_perm_b32 v0, v0, v37, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_perm_b32 v2, v2, v35, 0x7060302
-; GFX11-NEXT: v_perm_b32 v1, v1, v36, 0x7060302
-; GFX11-NEXT: v_perm_b32 v3, v3, v34, 0x7060302
-; GFX11-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
-; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
-; GFX11-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX11-NEXT: v_dual_mul_f32 v7, v7, v23 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_mul_f32_e32 v23, v65, v64
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX11-NEXT: v_dual_mul_f32 v24, v67, v66 :: v_dual_and_b32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_mul_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_dual_mul_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v32, 16, v16
+; GFX11-NEXT: v_mul_f32_e32 v18, v39, v38
+; GFX11-NEXT: v_dual_mul_f32 v3, v3, v19 :: v_dual_and_b32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_mul_f32_e32 v19, v49, v48
+; GFX11-NEXT: v_mul_f32_e32 v17, v37, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX11-NEXT: v_dual_mul_f32 v5, v5, v21 :: v_dual_and_b32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_mul_f32_e32 v21, v53, v52
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_perm_b32 v2, v2, v17, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v18, 0x7060302
+; GFX11-NEXT: v_mul_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_mul_f32_e32 v16, v35, v34
+; GFX11-NEXT: v_mul_f32_e32 v32, v33, v32
+; GFX11-NEXT: v_perm_b32 v4, v4, v19, 0x7060302
+; GFX11-NEXT: v_perm_b32 v5, v5, v20, 0x7060302
+; GFX11-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX11-NEXT: v_perm_b32 v1, v1, v16, 0x7060302
+; GFX11-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX11-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX11-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX11-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX11-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX11-NEXT: v_perm_b32 v11, v11, v26, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v27, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v14, v14, v29, 0x7060302
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mul_f32 v16, v86, v16 :: v_dual_and_b32 v17, 0xffff0000, v31
+; GFX11-NEXT: v_mul_f32_e32 v15, v15, v17
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = fmul <32 x bfloat> %a, %b
@@ -14712,480 +14686,483 @@ define <32 x bfloat> @v_minnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-LABEL: v_minnum_v32bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX8-NEXT: v_min_f32_e32 v31, v32, v31
-; GFX8-NEXT: v_min_f32_e32 v30, v14, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX8-NEXT: v_min_f32_e32 v14, v32, v14
-; GFX8-NEXT: v_min_f32_e32 v13, v13, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX8-NEXT: v_min_f32_e32 v29, v32, v29
-; GFX8-NEXT: v_min_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX8-NEXT: v_min_f32_e32 v28, v32, v28
-; GFX8-NEXT: v_min_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX8-NEXT: v_min_f32_e32 v27, v32, v27
-; GFX8-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX8-NEXT: v_min_f32_e32 v26, v32, v26
-; GFX8-NEXT: v_min_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX8-NEXT: v_min_f32_e32 v8, v8, v24
-; GFX8-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX8-NEXT: v_min_f32_e32 v25, v32, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16
-; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16
-; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16
-; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16
-; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16
-; GFX8-NEXT: v_alignbit_b32 v13, v13, v14, 16
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_min_f32_e32 v32, v32, v33
-; GFX8-NEXT: v_min_f32_e32 v15, v15, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX8-NEXT: v_min_f32_e32 v24, v33, v24
-; GFX8-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX8-NEXT: v_min_f32_e32 v23, v33, v23
-; GFX8-NEXT: v_min_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX8-NEXT: v_min_f32_e32 v22, v33, v22
-; GFX8-NEXT: v_min_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX8-NEXT: v_min_f32_e32 v21, v33, v21
-; GFX8-NEXT: v_min_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_min_f32_e32 v20, v33, v20
-; GFX8-NEXT: v_min_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX8-NEXT: v_min_f32_e32 v19, v33, v19
-; GFX8-NEXT: v_min_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v1
-; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_min_f32_e32 v18, v33, v18
-; GFX8-NEXT: v_min_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v0
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_min_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_min_f32_e32 v17, v33, v17
+; GFX8-NEXT: v_min_f32_e32 v31, v32, v31
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v1
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX8-NEXT: v_min_f32_e32 v1, v1, v17
+; GFX8-NEXT: v_min_f32_e32 v16, v31, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_alignbit_b32 v1, v1, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX8-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX8-NEXT: v_min_f32_e32 v2, v2, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_alignbit_b32 v2, v2, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX8-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_alignbit_b32 v3, v3, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX8-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX8-NEXT: v_min_f32_e32 v4, v4, v17
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_alignbit_b32 v4, v4, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v5
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v21
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX8-NEXT: v_min_f32_e32 v5, v5, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_alignbit_b32 v5, v5, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v22
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX8-NEXT: v_min_f32_e32 v6, v6, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v7
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v23
+; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX8-NEXT: v_min_f32_e32 v7, v7, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_alignbit_b32 v7, v7, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v8
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v24
+; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX8-NEXT: v_min_f32_e32 v8, v8, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_alignbit_b32 v8, v8, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v25
+; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX8-NEXT: v_min_f32_e32 v9, v9, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_alignbit_b32 v9, v9, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v10
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
+; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX8-NEXT: v_min_f32_e32 v10, v10, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX8-NEXT: v_alignbit_b32 v10, v10, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v11
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v27
+; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX8-NEXT: v_min_f32_e32 v11, v11, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_alignbit_b32 v11, v11, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v12
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v28
+; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX8-NEXT: v_min_f32_e32 v12, v12, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_alignbit_b32 v12, v12, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v13
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v29
+; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX8-NEXT: v_min_f32_e32 v13, v13, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_alignbit_b32 v13, v13, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v14
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v30
+; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX8-NEXT: v_min_f32_e32 v14, v14, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX8-NEXT: v_alignbit_b32 v14, v14, v16, 16
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v15
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX8-NEXT: v_min_f32_e32 v15, v15, v17
+; GFX8-NEXT: v_min_f32_e32 v16, v18, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v30
-; GFX8-NEXT: v_alignbit_b32 v0, v0, v17, 16
-; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16
-; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16
-; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16
-; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16
-; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16
-; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16
-; GFX8-NEXT: v_alignbit_b32 v14, v16, v31, 16
-; GFX8-NEXT: v_alignbit_b32 v15, v15, v32, 16
+; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_minnum_v32bf16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_min_f32_e32 v31, v32, v31
-; GFX9-NEXT: v_min_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_min_f32_e32 v30, v32, v30
-; GFX9-NEXT: v_min_f32_e32 v13, v13, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX9-NEXT: v_min_f32_e32 v29, v32, v29
-; GFX9-NEXT: v_min_f32_e32 v12, v12, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX9-NEXT: v_min_f32_e32 v28, v32, v28
-; GFX9-NEXT: v_min_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX9-NEXT: v_min_f32_e32 v27, v32, v27
-; GFX9-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX9-NEXT: v_min_f32_e32 v26, v32, v26
-; GFX9-NEXT: v_min_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX9-NEXT: v_min_f32_e32 v8, v8, v24
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX9-NEXT: v_min_f32_e32 v25, v32, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v16
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
-; GFX9-NEXT: v_perm_b32 v8, v8, v25, s4
-; GFX9-NEXT: v_perm_b32 v9, v9, v26, s4
-; GFX9-NEXT: v_perm_b32 v10, v10, v27, s4
-; GFX9-NEXT: v_perm_b32 v11, v11, v28, s4
-; GFX9-NEXT: v_perm_b32 v12, v12, v29, s4
-; GFX9-NEXT: v_perm_b32 v13, v13, v30, s4
-; GFX9-NEXT: v_perm_b32 v14, v14, v31, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_min_f32_e32 v32, v32, v33
-; GFX9-NEXT: v_min_f32_e32 v15, v15, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX9-NEXT: v_min_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX9-NEXT: v_min_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_min_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX9-NEXT: v_min_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_min_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX9-NEXT: v_min_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_min_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX9-NEXT: v_min_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_min_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX9-NEXT: v_min_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_min_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
+; GFX9-NEXT: v_perm_b32 v0, v0, v31, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v1
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX9-NEXT: v_min_f32_e32 v18, v33, v18
+; GFX9-NEXT: v_min_f32_e32 v16, v31, v16
; GFX9-NEXT: v_min_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX9-NEXT: v_min_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_min_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_perm_b32 v0, v0, v17, s4
-; GFX9-NEXT: v_perm_b32 v1, v1, v18, s4
-; GFX9-NEXT: v_perm_b32 v2, v2, v19, s4
-; GFX9-NEXT: v_perm_b32 v3, v3, v20, s4
-; GFX9-NEXT: v_perm_b32 v4, v4, v21, s4
-; GFX9-NEXT: v_perm_b32 v5, v5, v22, s4
-; GFX9-NEXT: v_perm_b32 v6, v6, v23, s4
-; GFX9-NEXT: v_perm_b32 v7, v7, v24, s4
-; GFX9-NEXT: v_perm_b32 v15, v15, v32, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v17
+; GFX9-NEXT: v_perm_b32 v2, v2, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_min_f32_e32 v3, v3, v17
+; GFX9-NEXT: v_perm_b32 v3, v3, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_min_f32_e32 v4, v4, v17
+; GFX9-NEXT: v_perm_b32 v4, v4, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v21
+; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_min_f32_e32 v5, v5, v17
+; GFX9-NEXT: v_perm_b32 v5, v5, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v22
+; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_min_f32_e32 v6, v6, v17
+; GFX9-NEXT: v_perm_b32 v6, v6, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v23
+; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_min_f32_e32 v7, v7, v17
+; GFX9-NEXT: v_perm_b32 v7, v7, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v8
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v24
+; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_min_f32_e32 v8, v8, v17
+; GFX9-NEXT: v_perm_b32 v8, v8, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v9
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v25
+; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_min_f32_e32 v9, v9, v17
+; GFX9-NEXT: v_perm_b32 v9, v9, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v10
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v26
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_min_f32_e32 v10, v10, v17
+; GFX9-NEXT: v_perm_b32 v10, v10, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v11
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v27
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_min_f32_e32 v11, v11, v17
+; GFX9-NEXT: v_perm_b32 v11, v11, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v12
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_min_f32_e32 v12, v12, v17
+; GFX9-NEXT: v_perm_b32 v12, v12, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v13
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v29
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX9-NEXT: v_min_f32_e32 v13, v13, v17
+; GFX9-NEXT: v_perm_b32 v13, v13, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v14
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v30
+; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_min_f32_e32 v14, v14, v17
+; GFX9-NEXT: v_perm_b32 v14, v14, v16, s4
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v15
+; GFX9-NEXT: v_min_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_min_f32_e32 v15, v15, v17
+; GFX9-NEXT: v_perm_b32 v15, v15, v16, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_minnum_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
-; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT: v_min_f32_e32 v39, v48, v39
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v17
-; GFX10-NEXT: v_min_f32_e32 v11, v11, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v1
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_min_f32_e32 v49, v50, v49
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v16
-; GFX10-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v5
; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_min_f32_e32 v33, v34, v33
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v20
-; GFX10-NEXT: v_min_f32_e32 v14, v14, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v4
-; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_min_f32_e32 v35, v36, v35
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v19
-; GFX10-NEXT: v_min_f32_e32 v13, v13, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_min_f32_e32 v37, v38, v37
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v18
-; GFX10-NEXT: v_min_f32_e32 v12, v12, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v22
+; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v2
; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v3
+; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_min_f32_e32 v5, v5, v21
+; GFX10-NEXT: v_min_f32_e32 v21, v53, v52
+; GFX10-NEXT: v_min_f32_e32 v6, v6, v22
+; GFX10-NEXT: v_min_f32_e32 v22, v55, v54
+; GFX10-NEXT: v_min_f32_e32 v7, v7, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v25
+; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX10-NEXT: v_min_f32_e32 v32, v33, v32
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v10
+; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX10-NEXT: v_min_f32_e32 v0, v0, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX10-NEXT: v_min_f32_e32 v34, v35, v34
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v11
+; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_min_f32_e32 v1, v1, v17
-; GFX10-NEXT: v_min_f32_e32 v51, v52, v51
-; GFX10-NEXT: v_min_f32_e32 v9, v9, v25
-; GFX10-NEXT: v_min_f32_e32 v25, v54, v53
-; GFX10-NEXT: v_min_f32_e32 v8, v8, v24
-; GFX10-NEXT: v_min_f32_e32 v24, v64, v55
-; GFX10-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX10-NEXT: v_min_f32_e32 v23, v66, v65
-; GFX10-NEXT: v_min_f32_e32 v6, v6, v22
-; GFX10-NEXT: v_min_f32_e32 v22, v68, v67
-; GFX10-NEXT: v_min_f32_e32 v5, v5, v21
-; GFX10-NEXT: v_min_f32_e32 v21, v30, v34
-; GFX10-NEXT: v_min_f32_e32 v29, v29, v36
-; GFX10-NEXT: v_min_f32_e32 v28, v28, v38
-; GFX10-NEXT: v_min_f32_e32 v27, v27, v48
-; GFX10-NEXT: v_min_f32_e32 v26, v26, v50
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v28
+; GFX10-NEXT: v_min_f32_e32 v36, v37, v36
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v12
+; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX10-NEXT: v_min_f32_e32 v2, v2, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; GFX10-NEXT: v_min_f32_e32 v38, v39, v38
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v13
+; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX10-NEXT: v_min_f32_e32 v3, v3, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v30
+; GFX10-NEXT: v_min_f32_e32 v48, v49, v48
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v14
+; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX10-NEXT: v_min_f32_e32 v4, v4, v20
-; GFX10-NEXT: v_perm_b32 v1, v1, v27, 0x7060302
-; GFX10-NEXT: v_perm_b32 v0, v0, v26, 0x7060302
-; GFX10-NEXT: v_perm_b32 v2, v2, v28, 0x7060302
-; GFX10-NEXT: v_perm_b32 v3, v3, v29, 0x7060302
-; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX10-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX10-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
-; GFX10-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX10-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX10-NEXT: v_perm_b32 v9, v9, v51, 0x7060302
-; GFX10-NEXT: v_perm_b32 v10, v10, v49, 0x7060302
-; GFX10-NEXT: v_perm_b32 v11, v11, v39, 0x7060302
-; GFX10-NEXT: v_perm_b32 v12, v12, v37, 0x7060302
-; GFX10-NEXT: v_perm_b32 v13, v13, v35, 0x7060302
-; GFX10-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v15
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX10-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX10-NEXT: v_min_f32_e32 v50, v51, v50
+; GFX10-NEXT: v_min_f32_e32 v23, v65, v64
+; GFX10-NEXT: v_min_f32_e32 v8, v8, v24
+; GFX10-NEXT: v_min_f32_e32 v24, v67, v66
+; GFX10-NEXT: v_min_f32_e32 v9, v9, v25
+; GFX10-NEXT: v_min_f32_e32 v25, v33, v68
+; GFX10-NEXT: v_min_f32_e32 v10, v10, v26
+; GFX10-NEXT: v_min_f32_e32 v16, v35, v16
+; GFX10-NEXT: v_min_f32_e32 v11, v11, v27
+; GFX10-NEXT: v_min_f32_e32 v17, v37, v17
+; GFX10-NEXT: v_min_f32_e32 v12, v12, v28
+; GFX10-NEXT: v_min_f32_e32 v18, v39, v18
+; GFX10-NEXT: v_min_f32_e32 v13, v13, v29
+; GFX10-NEXT: v_min_f32_e32 v19, v49, v19
+; GFX10-NEXT: v_min_f32_e32 v14, v14, v30
+; GFX10-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v34, 0x7060302
+; GFX10-NEXT: v_perm_b32 v2, v2, v36, 0x7060302
+; GFX10-NEXT: v_perm_b32 v3, v3, v38, 0x7060302
+; GFX10-NEXT: v_perm_b32 v4, v4, v48, 0x7060302
+; GFX10-NEXT: v_perm_b32 v5, v5, v50, 0x7060302
+; GFX10-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX10-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX10-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX10-NEXT: v_perm_b32 v11, v11, v16, 0x7060302
+; GFX10-NEXT: v_perm_b32 v12, v12, v17, 0x7060302
+; GFX10-NEXT: v_perm_b32 v13, v13, v18, 0x7060302
+; GFX10-NEXT: v_perm_b32 v14, v14, v19, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX10-NEXT: v_min_f32_e32 v16, v32, v16
-; GFX10-NEXT: v_min_f32_e32 v15, v15, v17
-; GFX10-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v31
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v31
+; GFX10-NEXT: v_min_f32_e32 v20, v20, v21
+; GFX10-NEXT: v_min_f32_e32 v15, v15, v22
+; GFX10-NEXT: v_perm_b32 v15, v15, v20, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_minnum_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: scratch_load_b32 v31, off, s32
-; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_dual_min_f32 v0, v0, v16 :: v_dual_and_b32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v10
+; GFX11-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v27
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v5
+; GFX11-NEXT: v_dual_min_f32 v10, v10, v26 :: v_dual_and_b32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v25
; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_dual_min_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
-; GFX11-NEXT: v_dual_min_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_min_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_min_f32 v2, v2, v18 :: v_dual_min_f32 v3, v3, v19
-; GFX11-NEXT: v_dual_min_f32 v4, v4, v20 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_dual_min_f32 v9, v9, v25 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_min_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_dual_min_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v28
+; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v22
; GFX11-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_min_f32_e32 v10, v10, v26
-; GFX11-NEXT: v_min_f32_e32 v26, v52, v51
-; GFX11-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX11-NEXT: v_min_f32_e32 v25, v54, v53
-; GFX11-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v29
+; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v13
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_min_f32 v8, v8, v24 :: v_dual_and_b32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_min_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_min_f32_e32 v7, v7, v23
-; GFX11-NEXT: v_min_f32_e32 v23, v66, v65
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_min_f32 v12, v12, v28 :: v_dual_and_b32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_min_f32 v28, v48, v39 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_dual_min_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_dual_min_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_min_f32 v27, v50, v49 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_min_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v30
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v14
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX11-NEXT: v_dual_min_f32 v11, v11, v27 :: v_dual_and_b32 v20, 0xffff0000, v20
; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_min_f32_e32 v37, v86, v85
-; GFX11-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_dual_min_f32 v26, v71, v70 :: v_dual_lshlrev_b32 v49, 16, v4
+; GFX11-NEXT: v_dual_min_f32 v13, v13, v29 :: v_dual_and_b32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_min_f32_e32 v4, v4, v20
+; GFX11-NEXT: v_dual_min_f32 v8, v8, v24 :: v_dual_min_f32 v9, v9, v25
+; GFX11-NEXT: v_min_f32_e32 v25, v69, v68
+; GFX11-NEXT: v_dual_min_f32 v20, v51, v50 :: v_dual_lshlrev_b32 v39, 16, v3
+; GFX11-NEXT: v_min_f32_e32 v27, v81, v80
+; GFX11-NEXT: v_min_f32_e32 v12, v12, v28
+; GFX11-NEXT: v_dual_min_f32 v28, v83, v82 :: v_dual_min_f32 v29, v85, v84
+; GFX11-NEXT: v_dual_min_f32 v6, v6, v22 :: v_dual_and_b32 v3, 0xffff0000, v3
+; GFX11-NEXT: v_min_f32_e32 v22, v55, v54
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX11-NEXT: v_min_f32_e32 v14, v14, v30
-; GFX11-NEXT: v_dual_min_f32 v30, v36, v35 :: v_dual_min_f32 v33, v34, v33
-; GFX11-NEXT: v_dual_min_f32 v34, v80, v71 :: v_dual_min_f32 v35, v82, v81
-; GFX11-NEXT: v_min_f32_e32 v36, v84, v83
-; GFX11-NEXT: v_dual_min_f32 v16, v32, v16 :: v_dual_min_f32 v15, v15, v17
-; GFX11-NEXT: v_perm_b32 v0, v0, v37, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_perm_b32 v2, v2, v35, 0x7060302
-; GFX11-NEXT: v_perm_b32 v1, v1, v36, 0x7060302
-; GFX11-NEXT: v_perm_b32 v3, v3, v34, 0x7060302
-; GFX11-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
-; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
-; GFX11-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX11-NEXT: v_dual_min_f32 v7, v7, v23 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_min_f32_e32 v23, v65, v64
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX11-NEXT: v_dual_min_f32 v24, v67, v66 :: v_dual_and_b32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_min_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_dual_min_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v32, 16, v16
+; GFX11-NEXT: v_min_f32_e32 v18, v39, v38
+; GFX11-NEXT: v_dual_min_f32 v3, v3, v19 :: v_dual_and_b32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_min_f32_e32 v19, v49, v48
+; GFX11-NEXT: v_min_f32_e32 v17, v37, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX11-NEXT: v_dual_min_f32 v5, v5, v21 :: v_dual_and_b32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_min_f32_e32 v21, v53, v52
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_perm_b32 v2, v2, v17, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v18, 0x7060302
+; GFX11-NEXT: v_min_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_min_f32_e32 v16, v35, v34
+; GFX11-NEXT: v_min_f32_e32 v32, v33, v32
+; GFX11-NEXT: v_perm_b32 v4, v4, v19, 0x7060302
+; GFX11-NEXT: v_perm_b32 v5, v5, v20, 0x7060302
+; GFX11-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX11-NEXT: v_perm_b32 v1, v1, v16, 0x7060302
+; GFX11-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX11-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX11-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX11-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX11-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX11-NEXT: v_perm_b32 v11, v11, v26, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v27, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v14, v14, v29, 0x7060302
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_min_f32 v16, v86, v16 :: v_dual_and_b32 v17, 0xffff0000, v31
+; GFX11-NEXT: v_min_f32_e32 v15, v15, v17
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = call <32 x bfloat> @llvm.minnum.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b)
@@ -16836,480 +16813,483 @@ define <32 x bfloat> @v_maxnum_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b) {
; GFX8-LABEL: v_maxnum_v32bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX8-NEXT: v_max_f32_e32 v31, v32, v31
-; GFX8-NEXT: v_max_f32_e32 v30, v14, v30
-; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX8-NEXT: v_max_f32_e32 v14, v32, v14
-; GFX8-NEXT: v_max_f32_e32 v13, v13, v29
-; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX8-NEXT: v_max_f32_e32 v29, v32, v29
-; GFX8-NEXT: v_max_f32_e32 v12, v12, v28
-; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX8-NEXT: v_max_f32_e32 v28, v32, v28
-; GFX8-NEXT: v_max_f32_e32 v11, v11, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX8-NEXT: v_max_f32_e32 v27, v32, v27
-; GFX8-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX8-NEXT: v_max_f32_e32 v26, v32, v26
-; GFX8-NEXT: v_max_f32_e32 v9, v9, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX8-NEXT: v_max_f32_e32 v8, v8, v24
-; GFX8-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX8-NEXT: v_max_f32_e32 v25, v32, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16
-; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16
-; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16
-; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16
-; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16
-; GFX8-NEXT: v_alignbit_b32 v13, v13, v14, 16
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX8-NEXT: v_max_f32_e32 v32, v32, v33
-; GFX8-NEXT: v_max_f32_e32 v15, v15, v24
-; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX8-NEXT: v_max_f32_e32 v24, v33, v24
-; GFX8-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX8-NEXT: v_max_f32_e32 v23, v33, v23
-; GFX8-NEXT: v_max_f32_e32 v6, v6, v22
-; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX8-NEXT: v_max_f32_e32 v22, v33, v22
-; GFX8-NEXT: v_max_f32_e32 v5, v5, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX8-NEXT: v_max_f32_e32 v21, v33, v21
-; GFX8-NEXT: v_max_f32_e32 v4, v4, v20
-; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_max_f32_e32 v20, v33, v20
-; GFX8-NEXT: v_max_f32_e32 v3, v3, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX8-NEXT: v_max_f32_e32 v19, v33, v19
-; GFX8-NEXT: v_max_f32_e32 v2, v2, v18
-; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v1
-; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_max_f32_e32 v18, v33, v18
-; GFX8-NEXT: v_max_f32_e32 v1, v1, v17
-; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v0
; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_max_f32_e32 v0, v0, v16
-; GFX8-NEXT: v_max_f32_e32 v17, v33, v17
+; GFX8-NEXT: v_max_f32_e32 v31, v32, v31
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v1
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX8-NEXT: v_max_f32_e32 v1, v1, v17
+; GFX8-NEXT: v_max_f32_e32 v16, v31, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_alignbit_b32 v1, v1, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX8-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX8-NEXT: v_max_f32_e32 v2, v2, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_alignbit_b32 v2, v2, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX8-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v17
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_alignbit_b32 v3, v3, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX8-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX8-NEXT: v_max_f32_e32 v4, v4, v17
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_alignbit_b32 v4, v4, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v5
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v21
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX8-NEXT: v_max_f32_e32 v5, v5, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_alignbit_b32 v5, v5, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v6
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v22
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX8-NEXT: v_max_f32_e32 v6, v6, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v7
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v23
+; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX8-NEXT: v_max_f32_e32 v7, v7, v18
; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_alignbit_b32 v7, v7, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v8
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v24
+; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX8-NEXT: v_max_f32_e32 v8, v8, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_alignbit_b32 v8, v8, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v25
+; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX8-NEXT: v_max_f32_e32 v9, v9, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_alignbit_b32 v9, v9, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v10
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v26
+; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX8-NEXT: v_max_f32_e32 v10, v10, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX8-NEXT: v_alignbit_b32 v10, v10, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v11
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v27
+; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX8-NEXT: v_max_f32_e32 v11, v11, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_alignbit_b32 v11, v11, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v12
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v28
+; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX8-NEXT: v_max_f32_e32 v12, v12, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_alignbit_b32 v12, v12, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v13
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v29
+; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX8-NEXT: v_max_f32_e32 v13, v13, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_alignbit_b32 v13, v13, v16, 16
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v14
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v30
+; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX8-NEXT: v_max_f32_e32 v14, v14, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX8-NEXT: v_alignbit_b32 v14, v14, v16, 16
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v15
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX8-NEXT: v_max_f32_e32 v15, v15, v17
+; GFX8-NEXT: v_max_f32_e32 v16, v18, v16
; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v30
-; GFX8-NEXT: v_alignbit_b32 v0, v0, v17, 16
-; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16
-; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16
-; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16
-; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16
-; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16
-; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16
-; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16
-; GFX8-NEXT: v_alignbit_b32 v14, v16, v31, 16
-; GFX8-NEXT: v_alignbit_b32 v15, v15, v32, 16
+; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_maxnum_v32bf16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v14
-; GFX9-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v16
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX9-NEXT: v_max_f32_e32 v31, v32, v31
-; GFX9-NEXT: v_max_f32_e32 v14, v14, v30
-; GFX9-NEXT: v_lshlrev_b32_e32 v30, 16, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX9-NEXT: v_max_f32_e32 v30, v32, v30
-; GFX9-NEXT: v_max_f32_e32 v13, v13, v29
-; GFX9-NEXT: v_lshlrev_b32_e32 v29, 16, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX9-NEXT: v_max_f32_e32 v29, v32, v29
-; GFX9-NEXT: v_max_f32_e32 v12, v12, v28
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 16, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX9-NEXT: v_max_f32_e32 v28, v32, v28
-; GFX9-NEXT: v_max_f32_e32 v11, v11, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v27, 16, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v10
-; GFX9-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX9-NEXT: v_max_f32_e32 v27, v32, v27
-; GFX9-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 16, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v9
-; GFX9-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX9-NEXT: v_max_f32_e32 v26, v32, v26
-; GFX9-NEXT: v_max_f32_e32 v9, v9, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v25, 16, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v8
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX9-NEXT: v_max_f32_e32 v8, v8, v24
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32
-; GFX9-NEXT: v_max_f32_e32 v25, v32, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v16
; GFX9-NEXT: s_mov_b32 s4, 0x7060302
-; GFX9-NEXT: v_perm_b32 v8, v8, v25, s4
-; GFX9-NEXT: v_perm_b32 v9, v9, v26, s4
-; GFX9-NEXT: v_perm_b32 v10, v10, v27, s4
-; GFX9-NEXT: v_perm_b32 v11, v11, v28, s4
-; GFX9-NEXT: v_perm_b32 v12, v12, v29, s4
-; GFX9-NEXT: v_perm_b32 v13, v13, v30, s4
-; GFX9-NEXT: v_perm_b32 v14, v14, v31, s4
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v24
-; GFX9-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX9-NEXT: v_max_f32_e32 v32, v32, v33
-; GFX9-NEXT: v_max_f32_e32 v15, v15, v24
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 16, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v7
-; GFX9-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX9-NEXT: v_max_f32_e32 v24, v33, v24
-; GFX9-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 16, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v6
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX9-NEXT: v_max_f32_e32 v23, v33, v23
-; GFX9-NEXT: v_max_f32_e32 v6, v6, v22
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 16, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX9-NEXT: v_max_f32_e32 v22, v33, v22
-; GFX9-NEXT: v_max_f32_e32 v5, v5, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v21, 16, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v4
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX9-NEXT: v_max_f32_e32 v21, v33, v21
-; GFX9-NEXT: v_max_f32_e32 v4, v4, v20
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 16, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v3
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX9-NEXT: v_max_f32_e32 v20, v33, v20
-; GFX9-NEXT: v_max_f32_e32 v3, v3, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v19, 16, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v2
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX9-NEXT: v_max_f32_e32 v19, v33, v19
-; GFX9-NEXT: v_max_f32_e32 v2, v2, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 16, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v1
+; GFX9-NEXT: v_perm_b32 v0, v0, v31, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v17
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 16, v1
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX9-NEXT: v_max_f32_e32 v18, v33, v18
+; GFX9-NEXT: v_max_f32_e32 v16, v31, v16
; GFX9-NEXT: v_max_f32_e32 v1, v1, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v16
-; GFX9-NEXT: v_lshlrev_b32_e32 v33, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX9-NEXT: v_max_f32_e32 v17, v33, v17
-; GFX9-NEXT: v_max_f32_e32 v0, v0, v16
-; GFX9-NEXT: v_perm_b32 v0, v0, v17, s4
-; GFX9-NEXT: v_perm_b32 v1, v1, v18, s4
-; GFX9-NEXT: v_perm_b32 v2, v2, v19, s4
-; GFX9-NEXT: v_perm_b32 v3, v3, v20, s4
-; GFX9-NEXT: v_perm_b32 v4, v4, v21, s4
-; GFX9-NEXT: v_perm_b32 v5, v5, v22, s4
-; GFX9-NEXT: v_perm_b32 v6, v6, v23, s4
-; GFX9-NEXT: v_perm_b32 v7, v7, v24, s4
-; GFX9-NEXT: v_perm_b32 v15, v15, v32, s4
+; GFX9-NEXT: v_perm_b32 v1, v1, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v2
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v17
+; GFX9-NEXT: v_perm_b32 v2, v2, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v19
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v3
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX9-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX9-NEXT: v_max_f32_e32 v3, v3, v17
+; GFX9-NEXT: v_perm_b32 v3, v3, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v20
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v4
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v20
+; GFX9-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX9-NEXT: v_max_f32_e32 v4, v4, v17
+; GFX9-NEXT: v_perm_b32 v4, v4, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v5
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v21
+; GFX9-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX9-NEXT: v_max_f32_e32 v5, v5, v17
+; GFX9-NEXT: v_perm_b32 v5, v5, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v22
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v6
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v22
+; GFX9-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX9-NEXT: v_max_f32_e32 v6, v6, v17
+; GFX9-NEXT: v_perm_b32 v6, v6, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v7
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v23
+; GFX9-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX9-NEXT: v_max_f32_e32 v7, v7, v17
+; GFX9-NEXT: v_perm_b32 v7, v7, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v24
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v8
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v24
+; GFX9-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX9-NEXT: v_max_f32_e32 v8, v8, v17
+; GFX9-NEXT: v_perm_b32 v8, v8, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v9
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v25
+; GFX9-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX9-NEXT: v_max_f32_e32 v9, v9, v17
+; GFX9-NEXT: v_perm_b32 v9, v9, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v26
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v10
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v26
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX9-NEXT: v_max_f32_e32 v10, v10, v17
+; GFX9-NEXT: v_perm_b32 v10, v10, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v11
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v27
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX9-NEXT: v_max_f32_e32 v11, v11, v17
+; GFX9-NEXT: v_perm_b32 v11, v11, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v12
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v28
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX9-NEXT: v_max_f32_e32 v12, v12, v17
+; GFX9-NEXT: v_perm_b32 v12, v12, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v29
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v13
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v29
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX9-NEXT: v_max_f32_e32 v13, v13, v17
+; GFX9-NEXT: v_perm_b32 v13, v13, v16, s4
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v30
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v14
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v30
+; GFX9-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX9-NEXT: v_max_f32_e32 v14, v14, v17
+; GFX9-NEXT: v_perm_b32 v14, v14, v16, s4
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX9-NEXT: v_lshlrev_b32_e32 v17, 16, v15
+; GFX9-NEXT: v_max_f32_e32 v16, v17, v16
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX9-NEXT: v_max_f32_e32 v15, v15, v17
+; GFX9-NEXT: v_perm_b32 v15, v15, v16, s4
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_maxnum_v32bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
-; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10
-; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
-; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
-; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v14
-; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
-; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v13
-; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
-; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX10-NEXT: v_max_f32_e32 v39, v48, v39
-; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v17
-; GFX10-NEXT: v_max_f32_e32 v11, v11, v27
-; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v1
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_max_f32_e32 v49, v50, v49
-; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v16
-; GFX10-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
-; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v5
; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_max_f32_e32 v33, v34, v33
-; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v20
-; GFX10-NEXT: v_max_f32_e32 v14, v14, v30
-; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v4
-; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_max_f32_e32 v35, v36, v35
-; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v19
-; GFX10-NEXT: v_max_f32_e32 v13, v13, v29
-; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_max_f32_e32 v37, v38, v37
-; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v18
-; GFX10-NEXT: v_max_f32_e32 v12, v12, v28
-; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v52, 16, v22
+; GFX10-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v2
; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v3
+; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_max_f32_e32 v5, v5, v21
+; GFX10-NEXT: v_max_f32_e32 v21, v53, v52
+; GFX10-NEXT: v_max_f32_e32 v6, v6, v22
+; GFX10-NEXT: v_max_f32_e32 v22, v55, v54
+; GFX10-NEXT: v_max_f32_e32 v7, v7, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX10-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX10-NEXT: v_lshlrev_b32_e32 v66, 16, v25
+; GFX10-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX10-NEXT: v_max_f32_e32 v32, v33, v32
+; GFX10-NEXT: v_lshlrev_b32_e32 v33, 16, v10
+; GFX10-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; GFX10-NEXT: v_max_f32_e32 v0, v0, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v27
+; GFX10-NEXT: v_max_f32_e32 v34, v35, v34
+; GFX10-NEXT: v_lshlrev_b32_e32 v35, 16, v11
+; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GFX10-NEXT: v_max_f32_e32 v1, v1, v17
-; GFX10-NEXT: v_max_f32_e32 v51, v52, v51
-; GFX10-NEXT: v_max_f32_e32 v9, v9, v25
-; GFX10-NEXT: v_max_f32_e32 v25, v54, v53
-; GFX10-NEXT: v_max_f32_e32 v8, v8, v24
-; GFX10-NEXT: v_max_f32_e32 v24, v64, v55
-; GFX10-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX10-NEXT: v_max_f32_e32 v23, v66, v65
-; GFX10-NEXT: v_max_f32_e32 v6, v6, v22
-; GFX10-NEXT: v_max_f32_e32 v22, v68, v67
-; GFX10-NEXT: v_max_f32_e32 v5, v5, v21
-; GFX10-NEXT: v_max_f32_e32 v21, v30, v34
-; GFX10-NEXT: v_max_f32_e32 v29, v29, v36
-; GFX10-NEXT: v_max_f32_e32 v28, v28, v38
-; GFX10-NEXT: v_max_f32_e32 v27, v27, v48
-; GFX10-NEXT: v_max_f32_e32 v26, v26, v50
+; GFX10-NEXT: v_lshlrev_b32_e32 v17, 16, v28
+; GFX10-NEXT: v_max_f32_e32 v36, v37, v36
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v12
+; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
; GFX10-NEXT: v_max_f32_e32 v2, v2, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; GFX10-NEXT: v_max_f32_e32 v38, v39, v38
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v13
+; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GFX10-NEXT: v_max_f32_e32 v3, v3, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v30
+; GFX10-NEXT: v_max_f32_e32 v48, v49, v48
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v14
+; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; GFX10-NEXT: v_max_f32_e32 v4, v4, v20
-; GFX10-NEXT: v_perm_b32 v1, v1, v27, 0x7060302
-; GFX10-NEXT: v_perm_b32 v0, v0, v26, 0x7060302
-; GFX10-NEXT: v_perm_b32 v2, v2, v28, 0x7060302
-; GFX10-NEXT: v_perm_b32 v3, v3, v29, 0x7060302
-; GFX10-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX10-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX10-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
-; GFX10-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX10-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX10-NEXT: v_perm_b32 v9, v9, v51, 0x7060302
-; GFX10-NEXT: v_perm_b32 v10, v10, v49, 0x7060302
-; GFX10-NEXT: v_perm_b32 v11, v11, v39, 0x7060302
-; GFX10-NEXT: v_perm_b32 v12, v12, v37, 0x7060302
-; GFX10-NEXT: v_perm_b32 v13, v13, v35, 0x7060302
-; GFX10-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v15
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX10-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX10-NEXT: v_max_f32_e32 v50, v51, v50
+; GFX10-NEXT: v_max_f32_e32 v23, v65, v64
+; GFX10-NEXT: v_max_f32_e32 v8, v8, v24
+; GFX10-NEXT: v_max_f32_e32 v24, v67, v66
+; GFX10-NEXT: v_max_f32_e32 v9, v9, v25
+; GFX10-NEXT: v_max_f32_e32 v25, v33, v68
+; GFX10-NEXT: v_max_f32_e32 v10, v10, v26
+; GFX10-NEXT: v_max_f32_e32 v16, v35, v16
+; GFX10-NEXT: v_max_f32_e32 v11, v11, v27
+; GFX10-NEXT: v_max_f32_e32 v17, v37, v17
+; GFX10-NEXT: v_max_f32_e32 v12, v12, v28
+; GFX10-NEXT: v_max_f32_e32 v18, v39, v18
+; GFX10-NEXT: v_max_f32_e32 v13, v13, v29
+; GFX10-NEXT: v_max_f32_e32 v19, v49, v19
+; GFX10-NEXT: v_max_f32_e32 v14, v14, v30
+; GFX10-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v1, v34, 0x7060302
+; GFX10-NEXT: v_perm_b32 v2, v2, v36, 0x7060302
+; GFX10-NEXT: v_perm_b32 v3, v3, v38, 0x7060302
+; GFX10-NEXT: v_perm_b32 v4, v4, v48, 0x7060302
+; GFX10-NEXT: v_perm_b32 v5, v5, v50, 0x7060302
+; GFX10-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX10-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX10-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX10-NEXT: v_perm_b32 v11, v11, v16, 0x7060302
+; GFX10-NEXT: v_perm_b32 v12, v12, v17, 0x7060302
+; GFX10-NEXT: v_perm_b32 v13, v13, v18, 0x7060302
+; GFX10-NEXT: v_perm_b32 v14, v14, v19, 0x7060302
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v31
-; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX10-NEXT: v_max_f32_e32 v16, v32, v16
-; GFX10-NEXT: v_max_f32_e32 v15, v15, v17
-; GFX10-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
+; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v31
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v31
+; GFX10-NEXT: v_max_f32_e32 v20, v20, v21
+; GFX10-NEXT: v_max_f32_e32 v15, v15, v22
+; GFX10-NEXT: v_perm_b32 v15, v15, v20, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_maxnum_v32bf16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: scratch_load_b32 v31, off, s32
-; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v0
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
-; GFX11-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v11
-; GFX11-NEXT: v_dual_max_f32 v0, v0, v16 :: v_dual_and_b32 v11, 0xffff0000, v11
-; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v23
-; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v3
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 16, v26
+; GFX11-NEXT: v_lshlrev_b32_e32 v69, 16, v10
+; GFX11-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX11-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v27
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 16, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v24
+; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v8
+; GFX11-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v5
+; GFX11-NEXT: v_dual_max_f32 v10, v10, v26 :: v_dual_and_b32 v5, 0xffff0000, v5
+; GFX11-NEXT: v_lshlrev_b32_e32 v66, 16, v25
; GFX11-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v9
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v18
-; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
-; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v24
-; GFX11-NEXT: v_dual_max_f32 v1, v1, v17 :: v_dual_and_b32 v24, 0xffff0000, v24
-; GFX11-NEXT: v_dual_max_f32 v5, v5, v21 :: v_dual_lshlrev_b32 v50, 16, v10
-; GFX11-NEXT: v_dual_max_f32 v21, v70, v69 :: v_dual_and_b32 v10, 0xffff0000, v10
-; GFX11-NEXT: v_dual_max_f32 v2, v2, v18 :: v_dual_max_f32 v3, v3, v19
-; GFX11-NEXT: v_dual_max_f32 v4, v4, v20 :: v_dual_lshlrev_b32 v49, 16, v26
-; GFX11-NEXT: v_dual_max_f32 v9, v9, v25 :: v_dual_and_b32 v26, 0xffff0000, v26
-; GFX11-NEXT: v_max_f32_e32 v6, v6, v22
-; GFX11-NEXT: v_dual_max_f32 v22, v68, v67 :: v_dual_lshlrev_b32 v37, 16, v28
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v80, 16, v28
+; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v22
; GFX11-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_max_f32_e32 v10, v10, v26
-; GFX11-NEXT: v_max_f32_e32 v26, v52, v51
-; GFX11-NEXT: v_perm_b32 v4, v4, v21, 0x7060302
-; GFX11-NEXT: v_max_f32_e32 v25, v54, v53
-; GFX11-NEXT: v_perm_b32 v5, v5, v22, 0x7060302
-; GFX11-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
-; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v6
+; GFX11-NEXT: v_lshlrev_b32_e32 v82, 16, v29
+; GFX11-NEXT: v_lshlrev_b32_e32 v83, 16, v13
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v31
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v13
+; GFX11-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v39, 16, v27
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_dual_max_f32 v8, v8, v24 :: v_dual_and_b32 v27, 0xffff0000, v27
-; GFX11-NEXT: v_max_f32_e32 v24, v64, v55
-; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v29
-; GFX11-NEXT: v_max_f32_e32 v7, v7, v23
-; GFX11-NEXT: v_max_f32_e32 v23, v66, v65
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_dual_max_f32 v12, v12, v28 :: v_dual_and_b32 v29, 0xffff0000, v29
-; GFX11-NEXT: v_dual_max_f32 v28, v48, v39 :: v_dual_lshlrev_b32 v33, 16, v30
-; GFX11-NEXT: v_dual_max_f32 v13, v13, v29 :: v_dual_lshlrev_b32 v34, 16, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v15
-; GFX11-NEXT: v_dual_max_f32 v11, v11, v27 :: v_dual_and_b32 v14, 0xffff0000, v14
-; GFX11-NEXT: v_dual_max_f32 v27, v50, v49 :: v_dual_and_b32 v30, 0xffff0000, v30
-; GFX11-NEXT: v_max_f32_e32 v29, v38, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v84, 16, v30
+; GFX11-NEXT: v_lshlrev_b32_e32 v85, 16, v14
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v86, 16, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 16, v9
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v20
+; GFX11-NEXT: v_dual_max_f32 v11, v11, v27 :: v_dual_and_b32 v20, 0xffff0000, v20
; GFX11-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
-; GFX11-NEXT: v_max_f32_e32 v37, v86, v85
-; GFX11-NEXT: v_perm_b32 v6, v6, v23, 0x7060302
+; GFX11-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11-NEXT: v_dual_max_f32 v26, v71, v70 :: v_dual_lshlrev_b32 v49, 16, v4
+; GFX11-NEXT: v_dual_max_f32 v13, v13, v29 :: v_dual_and_b32 v4, 0xffff0000, v4
+; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v2
+; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v19
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-NEXT: v_max_f32_e32 v4, v4, v20
+; GFX11-NEXT: v_dual_max_f32 v8, v8, v24 :: v_dual_max_f32 v9, v9, v25
+; GFX11-NEXT: v_max_f32_e32 v25, v69, v68
+; GFX11-NEXT: v_dual_max_f32 v20, v51, v50 :: v_dual_lshlrev_b32 v39, 16, v3
+; GFX11-NEXT: v_max_f32_e32 v27, v81, v80
+; GFX11-NEXT: v_max_f32_e32 v12, v12, v28
+; GFX11-NEXT: v_dual_max_f32 v28, v83, v82 :: v_dual_max_f32 v29, v85, v84
+; GFX11-NEXT: v_dual_max_f32 v6, v6, v22 :: v_dual_and_b32 v3, 0xffff0000, v3
+; GFX11-NEXT: v_max_f32_e32 v22, v55, v54
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v18
+; GFX11-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
; GFX11-NEXT: v_max_f32_e32 v14, v14, v30
-; GFX11-NEXT: v_dual_max_f32 v30, v36, v35 :: v_dual_max_f32 v33, v34, v33
-; GFX11-NEXT: v_dual_max_f32 v34, v80, v71 :: v_dual_max_f32 v35, v82, v81
-; GFX11-NEXT: v_max_f32_e32 v36, v84, v83
-; GFX11-NEXT: v_dual_max_f32 v16, v32, v16 :: v_dual_max_f32 v15, v15, v17
-; GFX11-NEXT: v_perm_b32 v0, v0, v37, 0x7060302
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_perm_b32 v2, v2, v35, 0x7060302
-; GFX11-NEXT: v_perm_b32 v1, v1, v36, 0x7060302
-; GFX11-NEXT: v_perm_b32 v3, v3, v34, 0x7060302
-; GFX11-NEXT: v_perm_b32 v7, v7, v24, 0x7060302
-; GFX11-NEXT: v_perm_b32 v8, v8, v25, 0x7060302
-; GFX11-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
-; GFX11-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
-; GFX11-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
-; GFX11-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
-; GFX11-NEXT: v_perm_b32 v14, v14, v33, 0x7060302
+; GFX11-NEXT: v_dual_max_f32 v7, v7, v23 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11-NEXT: v_max_f32_e32 v23, v65, v64
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX11-NEXT: v_dual_max_f32 v24, v67, v66 :: v_dual_and_b32 v21, 0xffff0000, v21
+; GFX11-NEXT: v_max_f32_e32 v2, v2, v18
+; GFX11-NEXT: v_dual_max_f32 v1, v1, v17 :: v_dual_lshlrev_b32 v32, 16, v16
+; GFX11-NEXT: v_max_f32_e32 v18, v39, v38
+; GFX11-NEXT: v_dual_max_f32 v3, v3, v19 :: v_dual_and_b32 v16, 0xffff0000, v16
+; GFX11-NEXT: v_max_f32_e32 v19, v49, v48
+; GFX11-NEXT: v_max_f32_e32 v17, v37, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v0
+; GFX11-NEXT: v_dual_max_f32 v5, v5, v21 :: v_dual_and_b32 v0, 0xffff0000, v0
+; GFX11-NEXT: v_max_f32_e32 v21, v53, v52
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_perm_b32 v2, v2, v17, 0x7060302
+; GFX11-NEXT: v_perm_b32 v3, v3, v18, 0x7060302
+; GFX11-NEXT: v_max_f32_e32 v0, v0, v16
+; GFX11-NEXT: v_max_f32_e32 v16, v35, v34
+; GFX11-NEXT: v_max_f32_e32 v32, v33, v32
+; GFX11-NEXT: v_perm_b32 v4, v4, v19, 0x7060302
+; GFX11-NEXT: v_perm_b32 v5, v5, v20, 0x7060302
+; GFX11-NEXT: v_perm_b32 v6, v6, v21, 0x7060302
+; GFX11-NEXT: v_perm_b32 v1, v1, v16, 0x7060302
+; GFX11-NEXT: v_perm_b32 v0, v0, v32, 0x7060302
+; GFX11-NEXT: v_perm_b32 v7, v7, v22, 0x7060302
+; GFX11-NEXT: v_perm_b32 v8, v8, v23, 0x7060302
+; GFX11-NEXT: v_perm_b32 v9, v9, v24, 0x7060302
+; GFX11-NEXT: v_perm_b32 v10, v10, v25, 0x7060302
+; GFX11-NEXT: v_perm_b32 v11, v11, v26, 0x7060302
+; GFX11-NEXT: v_perm_b32 v12, v12, v27, 0x7060302
+; GFX11-NEXT: v_perm_b32 v13, v13, v28, 0x7060302
+; GFX11-NEXT: v_perm_b32 v14, v14, v29, 0x7060302
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v31
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_max_f32 v16, v86, v16 :: v_dual_and_b32 v17, 0xffff0000, v31
+; GFX11-NEXT: v_max_f32_e32 v15, v15, v17
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_perm_b32 v15, v15, v16, 0x7060302
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = call <32 x bfloat> @llvm.maxnum.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b)
@@ -25401,38 +25381,36 @@ define <3 x bfloat> @v_select_v3bf16(i1 %cond, <3 x bfloat> %a, <3 x bfloat> %b)
; GCN-LABEL: v_select_v3bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
+; GCN-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GCN-NEXT: v_alignbit_b32 v2, v5, v4, 16
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_lshlrev_b32_e32 v2, 16, v3
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_select_v3bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v5
+; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7-NEXT: v_alignbit_b32 v2, v2, v4, 16
; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v6
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v3
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
@@ -25441,14 +25419,8 @@ define <3 x bfloat> @v_select_v3bf16(i1 %cond, <3 x bfloat> %a, <3 x bfloat> %b)
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_select_v3bf16:
@@ -25485,47 +25457,43 @@ define <4 x bfloat> @v_select_v4bf16(i1 %cond, <4 x bfloat> %a, <4 x bfloat> %b)
; GCN-LABEL: v_select_v4bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GCN-NEXT: v_lshrrev_b32_e32 v7, 16, v7
; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v8
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
+; GCN-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GCN-NEXT: v_alignbit_b32 v2, v6, v5, 16
+; GCN-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GCN-NEXT: v_alignbit_b32 v4, v8, v7, 16
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v5, v1, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_select_v4bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v6
+; GFX7-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v8
+; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7-NEXT: v_alignbit_b32 v2, v2, v5, 16
+; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v5, v1, vcc
-; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_select_v4bf16:
@@ -25533,18 +25501,8 @@ define <4 x bfloat> @v_select_v4bf16(i1 %cond, <4 x bfloat> %a, <4 x bfloat> %b)
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v5
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_select_v4bf16:
@@ -25581,63 +25539,57 @@ define <6 x bfloat> @v_select_v6bf16(i1 %cond, <6 x bfloat> %a, <6 x bfloat> %b)
; GCN-LABEL: v_select_v6bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GCN-NEXT: v_lshrrev_b32_e32 v7, 16, v7
; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GCN-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; GCN-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT: v_lshrrev_b32_e32 v11, 16, v11
; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GCN-NEXT: v_lshrrev_b32_e32 v12, 16, v12
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
+; GCN-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GCN-NEXT: v_alignbit_b32 v2, v8, v7, 16
+; GCN-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GCN-NEXT: v_alignbit_b32 v4, v10, v9, 16
+; GCN-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GCN-NEXT: v_alignbit_b32 v6, v12, v11, 16
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT: v_cndmask_b32_e32 v6, v12, v6, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v5, v11, v5, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v4, v10, v4, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v7, v1, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GCN-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_select_v6bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v11
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v8
+; GFX7-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v10
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v12
+; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7-NEXT: v_alignbit_b32 v2, v2, v7, 16
+; GFX7-NEXT: v_alignbit_b32 v4, v4, v9, 16
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v11, 16
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v12, v6, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v11, v5, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v10, v4, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v7, v1, vcc
-; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_select_v6bf16:
@@ -25645,24 +25597,9 @@ define <6 x bfloat> @v_select_v6bf16(i1 %cond, <6 x bfloat> %a, <6 x bfloat> %b)
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v6
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v3, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v6
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v7
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v3, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_select_v6bf16:
@@ -25702,79 +25639,71 @@ define <8 x bfloat> @v_select_v8bf16(i1 %cond, <8 x bfloat> %a, <8 x bfloat> %b)
; GCN-LABEL: v_select_v8bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GCN-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GCN-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GCN-NEXT: v_lshrrev_b32_e32 v11, 16, v11
; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; GCN-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GCN-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; GCN-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GCN-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GCN-NEXT: v_lshrrev_b32_e32 v15, 16, v15
; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v8
; GCN-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
+; GCN-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GCN-NEXT: v_alignbit_b32 v2, v10, v9, 16
+; GCN-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GCN-NEXT: v_alignbit_b32 v4, v12, v11, 16
+; GCN-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GCN-NEXT: v_alignbit_b32 v6, v14, v13, 16
+; GCN-NEXT: v_alignbit_b32 v7, v8, v7, 16
+; GCN-NEXT: v_alignbit_b32 v8, v16, v15, 16
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT: v_cndmask_b32_e32 v8, v16, v8, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v9, v1, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GCN-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GCN-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GCN-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GCN-NEXT: v_lshlrev_b32_e32 v7, 16, v8
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_select_v8bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v11
; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v15
; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v10
+; GFX7-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v12
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v14
+; GFX7-NEXT: v_alignbit_b32 v7, v8, v7, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v16
+; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7-NEXT: v_alignbit_b32 v2, v2, v9, 16
+; GFX7-NEXT: v_alignbit_b32 v4, v4, v11, 16
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v13, 16
+; GFX7-NEXT: v_alignbit_b32 v8, v8, v15, 16
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v8, v16, v8, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v9, v1, vcc
-; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v8
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_select_v8bf16:
@@ -25782,30 +25711,10 @@ define <8 x bfloat> @v_select_v8bf16(i1 %cond, <8 x bfloat> %a, <8 x bfloat> %b)
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v4
-; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v8
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v7
-; GFX8-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v6
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v5
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v7
-; GFX8-NEXT: v_cndmask_b32_e32 v9, v10, v9, vcc
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v8
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v9
-; GFX8-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v4, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_select_v8bf16:
@@ -25847,151 +25756,135 @@ define <16 x bfloat> @v_select_v16bf16(i1 %cond, <16 x bfloat> %a, <16 x bfloat>
; GCN-LABEL: v_select_v16bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_and_b32_e32 v0, 1, v0
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v14
-; GCN-NEXT: v_lshrrev_b32_e32 v14, 16, v30
-; GCN-NEXT: v_cndmask_b32_e32 v14, v14, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v13
-; GCN-NEXT: v_lshrrev_b32_e32 v13, 16, v29
-; GCN-NEXT: v_cndmask_b32_e32 v13, v13, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GCN-NEXT: v_lshrrev_b32_e32 v12, 16, v28
-; GCN-NEXT: v_cndmask_b32_e32 v12, v12, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v11
-; GCN-NEXT: v_lshrrev_b32_e32 v11, 16, v27
-; GCN-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GCN-NEXT: v_lshrrev_b32_e32 v10, 16, v26
-; GCN-NEXT: v_cndmask_b32_e32 v10, v10, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v9
-; GCN-NEXT: v_lshrrev_b32_e32 v9, 16, v25
-; GCN-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v8
-; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v24
-; GCN-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v7
-; GCN-NEXT: v_lshrrev_b32_e32 v7, 16, v23
-; GCN-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GCN-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GCN-NEXT: v_alignbit_b32 v2, v2, v17, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GCN-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v20
+; GCN-NEXT: v_alignbit_b32 v4, v4, v19, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GCN-NEXT: v_alignbit_b32 v5, v6, v5, 16
; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v22
-; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v21
-; GCN-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc
-; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4
+; GCN-NEXT: v_alignbit_b32 v6, v6, v21, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GCN-NEXT: v_alignbit_b32 v7, v8, v7, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v24
+; GCN-NEXT: v_alignbit_b32 v8, v8, v23, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GCN-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v10, 16, v26
+; GCN-NEXT: v_alignbit_b32 v10, v10, v25, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GCN-NEXT: v_lshrrev_b32_e32 v17, 16, v28
+; GCN-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GCN-NEXT: v_lshrrev_b32_e32 v18, 16, v30
; GCN-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v4
-; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v20
-; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
-; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32
-; GCN-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v17
-; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GCN-NEXT: v_lshrrev_b32_e32 v17, 16, v18
-; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GCN-NEXT: v_lshrrev_b32_e32 v18, 16, v19
-; GCN-NEXT: v_cndmask_b32_e32 v3, v18, v3, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v17, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GCN-NEXT: v_alignbit_b32 v11, v12, v11, 16
+; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:4
+; GCN-NEXT: v_alignbit_b32 v12, v17, v27, 16
+; GCN-NEXT: buffer_load_dword v17, off, s[0:3], s32
+; GCN-NEXT: v_and_b32_e32 v0, 1, v0
+; GCN-NEXT: v_alignbit_b32 v13, v14, v13, 16
+; GCN-NEXT: v_alignbit_b32 v14, v18, v29, 16
+; GCN-NEXT: v_alignbit_b32 v15, v16, v15, 16
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT: v_cndmask_b32_e32 v13, v14, v13, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v11, v12, v11, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v9, v10, v9, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GCN-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GCN-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GCN-NEXT: v_lshlrev_b32_e32 v7, 16, v8
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GCN-NEXT: v_lshlrev_b32_e32 v8, 16, v9
-; GCN-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GCN-NEXT: v_lshlrev_b32_e32 v10, 16, v11
-; GCN-NEXT: v_lshlrev_b32_e32 v11, 16, v12
+; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GCN-NEXT: v_lshlrev_b32_e32 v12, 16, v13
-; GCN-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v14, 16, v21
+; GCN-NEXT: v_lshrrev_b32_e32 v14, 16, v19
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v17, 16, v20
-; GCN-NEXT: v_cndmask_b32_e32 v16, v14, v16, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v14, v17, v15, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; GCN-NEXT: v_lshlrev_b32_e32 v15, 16, v16
+; GCN-NEXT: v_alignbit_b32 v14, v14, v17, 16
+; GCN-NEXT: v_cndmask_b32_e32 v15, v14, v15, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v14, 16, v15
+; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_select_v16bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_alignbit_b32 v11, v12, v11, 16
+; GFX7-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:4
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32
+; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX7-NEXT: v_alignbit_b32 v7, v8, v7, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v24
; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX7-NEXT: v_alignbit_b32 v3, v4, v3, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v20
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX7-NEXT: v_alignbit_b32 v8, v8, v23, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v10
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v14
-; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v30
-; GFX7-NEXT: v_cndmask_b32_e32 v14, v14, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v13
-; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v29
-; GFX7-NEXT: v_cndmask_b32_e32 v13, v13, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v28
-; GFX7-NEXT: v_cndmask_b32_e32 v12, v12, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v11
-; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v27
-; GFX7-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v26
-; GFX7-NEXT: v_cndmask_b32_e32 v10, v10, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v9
-; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v25
-; GFX7-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v8
-; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v24
-; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v7
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v23
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v6
+; GFX7-NEXT: v_alignbit_b32 v2, v2, v17, 16
+; GFX7-NEXT: v_alignbit_b32 v4, v4, v19, 16
+; GFX7-NEXT: v_alignbit_b32 v5, v6, v5, 16
; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v22
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v21
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc
-; GFX7-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
-; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX7-NEXT: v_alignbit_b32 v9, v10, v9, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v26
+; GFX7-NEXT: v_lshrrev_b32_e32 v17, 16, v28
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX7-NEXT: v_lshrrev_b32_e32 v19, 16, v30
; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX7-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
-; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v16, v0, v16, vcc
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v20
-; GFX7-NEXT: v_cndmask_b32_e32 v15, v18, v15, vcc
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v17, v1, vcc
-; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v21, 16
+; GFX7-NEXT: v_alignbit_b32 v10, v10, v25, 16
+; GFX7-NEXT: v_alignbit_b32 v17, v17, v27, 16
+; GFX7-NEXT: v_alignbit_b32 v13, v14, v13, 16
+; GFX7-NEXT: v_alignbit_b32 v14, v19, v29, 16
+; GFX7-NEXT: v_alignbit_b32 v15, v16, v15, 16
+; GFX7-NEXT: v_cndmask_b32_e32 v13, v14, v13, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v11, v17, v11, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v10, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v6
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v8
-; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v9
-; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v10
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v11
-; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v12
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v12
; GFX7-NEXT: v_lshlrev_b32_e32 v12, 16, v13
-; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v14
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_alignbit_b32 v8, v8, v18, 16
+; GFX7-NEXT: v_cndmask_b32_e32 v15, v8, v15, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GFX7-NEXT: v_lshlrev_b32_e32 v14, 16, v15
-; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v16
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_select_v16bf16:
@@ -25999,54 +25892,14 @@ define <16 x bfloat> @v_select_v16bf16(i1 %cond, <16 x bfloat> %a, <16 x bfloat>
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v17, 16, v8
-; GFX8-NEXT: v_lshrrev_b32_e32 v18, 16, v16
-; GFX8-NEXT: v_cndmask_b32_e32 v8, v16, v8, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v7
-; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v15
-; GFX8-NEXT: v_cndmask_b32_e32 v16, v16, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v7, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v6
-; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v14
-; GFX8-NEXT: v_cndmask_b32_e32 v15, v15, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v14, v6, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v13
-; GFX8-NEXT: v_cndmask_b32_e32 v14, v14, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v13, v5, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v4
-; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v12
-; GFX8-NEXT: v_cndmask_b32_e32 v13, v13, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v12, v4, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v11
-; GFX8-NEXT: v_cndmask_b32_e32 v12, v12, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v3, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v10
-; GFX8-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v10, v2, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v9
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v11
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v12
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v13
-; GFX8-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v14
-; GFX8-NEXT: v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v15
-; GFX8-NEXT: v_cndmask_b32_e32 v17, v18, v17, vcc
-; GFX8-NEXT: v_or_b32_sdwa v5, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v16
-; GFX8-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v17
-; GFX8-NEXT: v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v9, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v10, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v11, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v12, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v13, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v14, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v15, v7, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v16, v8, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_select_v16bf16:
@@ -26098,407 +25951,365 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
; GCN-LABEL: v_select_v32bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GCN-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GCN-NEXT: v_alignbit_b32 v2, v2, v3, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GCN-NEXT: v_alignbit_b32 v3, v3, v5, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v8
+; GCN-NEXT: v_alignbit_b32 v4, v4, v7, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v10
+; GCN-NEXT: v_alignbit_b32 v5, v5, v9, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v12
+; GCN-NEXT: v_alignbit_b32 v6, v6, v11, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v7, 16, v14
+; GCN-NEXT: v_alignbit_b32 v7, v7, v13, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v16
+; GCN-NEXT: v_alignbit_b32 v8, v8, v15, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v9, 16, v18
+; GCN-NEXT: v_alignbit_b32 v9, v9, v17, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v10, 16, v20
+; GCN-NEXT: v_alignbit_b32 v10, v10, v19, 16
+; GCN-NEXT: v_lshrrev_b32_e32 v11, 16, v22
+; GCN-NEXT: v_alignbit_b32 v11, v11, v21, 16
+; GCN-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:12
+; GCN-NEXT: v_lshrrev_b32_e32 v12, 16, v24
+; GCN-NEXT: v_alignbit_b32 v12, v12, v23, 16
+; GCN-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:8
+; GCN-NEXT: v_lshrrev_b32_e32 v13, 16, v26
+; GCN-NEXT: v_alignbit_b32 v13, v13, v25, 16
+; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:20
+; GCN-NEXT: v_lshrrev_b32_e32 v14, 16, v28
+; GCN-NEXT: v_alignbit_b32 v14, v14, v27, 16
+; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:16
+; GCN-NEXT: v_lshrrev_b32_e32 v15, 16, v30
+; GCN-NEXT: v_alignbit_b32 v15, v15, v29, 16
+; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:28
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:132
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128
-; GCN-NEXT: s_waitcnt vmcnt(2)
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GCN-NEXT: v_cndmask_b32_e32 v31, v0, v31, vcc
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v32
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v32, 16, v33
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:124
-; GCN-NEXT: v_cndmask_b32_e32 v32, v32, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v30
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:120
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v30, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v30, v30, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v29, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116
-; GCN-NEXT: v_cndmask_b32_e32 v29, v29, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v28
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:112
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v28, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v28, v28, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v27, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:108
-; GCN-NEXT: v_cndmask_b32_e32 v27, v27, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v26
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:104
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v26, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v26, v26, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v25, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:100
-; GCN-NEXT: v_cndmask_b32_e32 v25, v25, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v24
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:96
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v24, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v24, v24, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v23
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v23, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:92
-; GCN-NEXT: v_cndmask_b32_e32 v23, v23, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v22
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:88
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v22, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v22, v22, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v21
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v21, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:84
-; GCN-NEXT: v_cndmask_b32_e32 v21, v21, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v20
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:80
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v20, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v20, v20, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v19
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v19, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:76
-; GCN-NEXT: v_cndmask_b32_e32 v19, v19, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v18
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:72
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v18, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v18, v18, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v17
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v17, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:68
-; GCN-NEXT: v_cndmask_b32_e32 v17, v17, v0, vcc
+; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:24
+; GCN-NEXT: s_waitcnt vmcnt(5)
; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v16
-; GCN-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64
-; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v16, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v16, v16, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v15
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v15, 16, v34
-; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
-; GCN-NEXT: v_cndmask_b32_e32 v15, v15, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v14
-; GCN-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:56
+; GCN-NEXT: s_waitcnt vmcnt(4)
+; GCN-NEXT: v_alignbit_b32 v0, v0, v17, 16
+; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:36
+; GCN-NEXT: s_waitcnt vmcnt(4)
+; GCN-NEXT: v_lshrrev_b32_e32 v16, 16, v18
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_alignbit_b32 v16, v16, v19, 16
+; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:32
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v17, 16, v20
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v17, v17, v21, 16
+; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:44
+; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:40
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v21, 16, v22
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v18, v21, v18, 16
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GCN-NEXT: v_cndmask_b32_e32 v33, v33, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v13
+; GCN-NEXT: v_lshrrev_b32_e32 v19, 16, v19
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v13, 16, v14
-; GCN-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:52
-; GCN-NEXT: v_cndmask_b32_e32 v13, v13, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GCN-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:48
+; GCN-NEXT: v_alignbit_b32 v19, v19, v20, 16
+; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:52
+; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:48
+; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:60
+; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:56
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v20, v20, v21, 16
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GCN-NEXT: v_cndmask_b32_e32 v14, v14, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v11
+; GCN-NEXT: v_lshrrev_b32_e32 v21, 16, v22
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v11, 16, v12
-; GCN-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:44
-; GCN-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GCN-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:40
+; GCN-NEXT: v_alignbit_b32 v21, v21, v23, 16
+; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:68
+; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:64
+; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:76
+; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:72
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v22, 16, v22
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v22, v22, v23, 16
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GCN-NEXT: v_cndmask_b32_e32 v12, v12, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v9
+; GCN-NEXT: v_lshrrev_b32_e32 v23, 16, v24
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v9, 16, v10
-; GCN-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:36
-; GCN-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v8
-; GCN-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:32
+; GCN-NEXT: v_alignbit_b32 v23, v23, v25, 16
+; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84
+; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80
+; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:92
+; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:88
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v24, v24, v25, 16
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GCN-NEXT: v_cndmask_b32_e32 v10, v10, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v7
+; GCN-NEXT: v_lshrrev_b32_e32 v25, 16, v26
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v7, 16, v8
-; GCN-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:28
-; GCN-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v6
-; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
+; GCN-NEXT: v_alignbit_b32 v25, v25, v27, 16
+; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:100
+; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:96
+; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:108
+; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:104
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v26, 16, v26
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v26, v26, v27, 16
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GCN-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v5
+; GCN-NEXT: v_lshrrev_b32_e32 v27, 16, v28
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; GCN-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:20
-; GCN-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v4
-; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:16
+; GCN-NEXT: v_alignbit_b32 v27, v27, v29, 16
+; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116
+; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:112
+; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:124
+; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v28, 16, v28
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v28, v28, v29, 16
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GCN-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v3
+; GCN-NEXT: v_lshrrev_b32_e32 v29, 16, v30
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GCN-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:12
-; GCN-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GCN-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:8
+; GCN-NEXT: v_alignbit_b32 v29, v29, v31, 16
+; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:4
+; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:132
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128
+; GCN-NEXT: s_waitcnt vmcnt(3)
+; GCN-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_alignbit_b32 v30, v30, v31, 16
; GCN-NEXT: s_waitcnt vmcnt(1)
-; GCN-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GCN-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
-; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; GCN-NEXT: v_lshrrev_b32_e32 v31, 16, v32
; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v4
+; GCN-NEXT: v_alignbit_b32 v31, v31, v33, 16
+; GCN-NEXT: v_cndmask_b32_e32 v31, v31, v30, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v29, v29, v15, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v28, v28, v14, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v27, v27, v13, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v26, v26, v12, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v25, v25, v11, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v24, v24, v10, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v23, v23, v9, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v15, v22, v8, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v13, v21, v7, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v11, v20, v6, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v9, v19, v5, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v7, v18, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v5, v17, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v3, v16, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GCN-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v6
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GCN-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GCN-NEXT: v_lshlrev_b32_e32 v5, 16, v8
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GCN-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GCN-NEXT: v_lshlrev_b32_e32 v7, 16, v10
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
; GCN-NEXT: v_lshlrev_b32_e32 v8, 16, v9
-; GCN-NEXT: v_lshlrev_b32_e32 v9, 16, v12
+; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; GCN-NEXT: v_lshlrev_b32_e32 v10, 16, v11
-; GCN-NEXT: v_lshlrev_b32_e32 v11, 16, v14
+; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; GCN-NEXT: v_lshlrev_b32_e32 v12, 16, v13
-; GCN-NEXT: v_lshlrev_b32_e32 v13, 16, v33
+; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; GCN-NEXT: v_lshlrev_b32_e32 v14, 16, v15
-; GCN-NEXT: v_lshlrev_b32_e32 v15, 16, v16
-; GCN-NEXT: v_lshlrev_b32_e32 v16, 16, v17
-; GCN-NEXT: v_lshlrev_b32_e32 v17, 16, v18
-; GCN-NEXT: v_lshlrev_b32_e32 v18, 16, v19
-; GCN-NEXT: v_lshlrev_b32_e32 v19, 16, v20
-; GCN-NEXT: v_lshlrev_b32_e32 v20, 16, v21
-; GCN-NEXT: v_lshlrev_b32_e32 v21, 16, v22
-; GCN-NEXT: v_lshlrev_b32_e32 v22, 16, v23
-; GCN-NEXT: v_lshlrev_b32_e32 v23, 16, v24
-; GCN-NEXT: v_lshlrev_b32_e32 v24, 16, v25
-; GCN-NEXT: v_lshlrev_b32_e32 v25, 16, v26
-; GCN-NEXT: v_lshlrev_b32_e32 v26, 16, v27
-; GCN-NEXT: v_lshlrev_b32_e32 v27, 16, v28
+; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GCN-NEXT: v_lshlrev_b32_e32 v16, 16, v23
+; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v23
+; GCN-NEXT: v_lshlrev_b32_e32 v18, 16, v24
+; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v24
+; GCN-NEXT: v_lshlrev_b32_e32 v20, 16, v25
+; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v25
+; GCN-NEXT: v_lshlrev_b32_e32 v22, 16, v26
+; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v26
+; GCN-NEXT: v_lshlrev_b32_e32 v24, 16, v27
+; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v27
+; GCN-NEXT: v_lshlrev_b32_e32 v26, 16, v28
+; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v28
; GCN-NEXT: v_lshlrev_b32_e32 v28, 16, v29
-; GCN-NEXT: v_lshlrev_b32_e32 v29, 16, v30
-; GCN-NEXT: v_lshlrev_b32_e32 v30, 16, v32
-; GCN-NEXT: v_lshlrev_b32_e32 v31, 16, v31
+; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GCN-NEXT: v_lshlrev_b32_e32 v30, 16, v31
+; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_select_v32bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX7-NEXT: v_alignbit_b32 v1, v2, v1, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v4
+; GFX7-NEXT: v_alignbit_b32 v2, v2, v3, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v6
+; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v8
+; GFX7-NEXT: v_alignbit_b32 v3, v3, v5, 16
+; GFX7-NEXT: v_alignbit_b32 v4, v4, v7, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v10
+; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v12
+; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v14
+; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v16
+; GFX7-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:12
+; GFX7-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:16
+; GFX7-NEXT: v_alignbit_b32 v6, v6, v11, 16
+; GFX7-NEXT: v_alignbit_b32 v7, v7, v13, 16
+; GFX7-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:24
+; GFX7-NEXT: v_alignbit_b32 v8, v8, v15, 16
+; GFX7-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:40
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:72
+; GFX7-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:8
+; GFX7-NEXT: v_alignbit_b32 v5, v5, v9, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v18
+; GFX7-NEXT: v_lshrrev_b32_e32 v26, 16, v26
+; GFX7-NEXT: v_alignbit_b32 v9, v9, v17, 16
+; GFX7-NEXT: v_alignbit_b32 v25, v26, v25, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v22, 16, v22
+; GFX7-NEXT: v_alignbit_b32 v21, v22, v21, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX7-NEXT: v_alignbit_b32 v29, v30, v29, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX7-NEXT: v_alignbit_b32 v19, v20, v19, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX7-NEXT: v_alignbit_b32 v23, v24, v23, 16
+; GFX7-NEXT: v_lshrrev_b32_e32 v28, 16, v28
+; GFX7-NEXT: v_alignbit_b32 v27, v28, v27, 16
; GFX7-NEXT: v_and_b32_e32 v0, 1, v0
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4
-; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:132
-; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
-; GFX7-NEXT: s_waitcnt vmcnt(2)
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_lshrrev_b32_e32 v31, 16, v31
-; GFX7-NEXT: v_cndmask_b32_e32 v31, v31, v0, vcc
-; GFX7-NEXT: buffer_load_dword v0, off, s[0:3], s32
-; GFX7-NEXT: v_lshlrev_b32_e32 v31, 16, v31
-; GFX7-NEXT: s_waitcnt vmcnt(1)
-; GFX7-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v32, v32, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v30
+; GFX7-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:32
+; GFX7-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:56
+; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:108
+; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:48
+; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:92
+; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:64
; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:124
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX7-NEXT: v_cndmask_b32_e32 v30, v30, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v29
-; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:120
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v29, 16, v29
-; GFX7-NEXT: v_cndmask_b32_e32 v29, v29, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v28
+; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:84
+; GFX7-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100
; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v28, 16, v28
-; GFX7-NEXT: v_cndmask_b32_e32 v28, v28, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v27
-; GFX7-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:112
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX7-NEXT: v_cndmask_b32_e32 v27, v27, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v26
-; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:108
-; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32
+; GFX7-NEXT: s_waitcnt vmcnt(14)
+; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX7-NEXT: s_waitcnt vmcnt(12)
+; GFX7-NEXT: v_alignbit_b32 v10, v10, v11, 16
+; GFX7-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:20
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v10, v1, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT: s_waitcnt vmcnt(10)
; GFX7-NEXT: v_lshrrev_b32_e32 v26, 16, v26
-; GFX7-NEXT: v_cndmask_b32_e32 v26, v26, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v25
-; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v25, 16, v25
-; GFX7-NEXT: v_cndmask_b32_e32 v25, v25, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v24
-; GFX7-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX7-NEXT: v_cndmask_b32_e32 v24, v24, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v23
-; GFX7-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:96
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v23, 16, v23
-; GFX7-NEXT: v_cndmask_b32_e32 v23, v23, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v22
-; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:92
-; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_waitcnt vmcnt(8)
; GFX7-NEXT: v_lshrrev_b32_e32 v22, 16, v22
-; GFX7-NEXT: v_cndmask_b32_e32 v22, v22, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v21
-; GFX7-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:88
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX7-NEXT: v_cndmask_b32_e32 v21, v21, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v20
-; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:84
-; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: s_waitcnt vmcnt(6)
+; GFX7-NEXT: v_lshrrev_b32_e32 v30, 16, v30
+; GFX7-NEXT: s_waitcnt vmcnt(5)
; GFX7-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX7-NEXT: v_cndmask_b32_e32 v20, v20, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v19
-; GFX7-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:80
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX7-NEXT: v_cndmask_b32_e32 v19, v19, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v18
-; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:76
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX7-NEXT: v_cndmask_b32_e32 v18, v18, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v17
-; GFX7-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:72
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX7-NEXT: v_cndmask_b32_e32 v17, v17, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v16
-; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:68
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX7-NEXT: v_cndmask_b32_e32 v16, v16, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v15
-; GFX7-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:64
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; GFX7-NEXT: v_cndmask_b32_e32 v15, v15, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v14
-; GFX7-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:60
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX7-NEXT: v_cndmask_b32_e32 v14, v14, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v13
-; GFX7-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:56
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; GFX7-NEXT: v_cndmask_b32_e32 v13, v13, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GFX7-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:52
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX7-NEXT: v_cndmask_b32_e32 v12, v12, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v11
-; GFX7-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:48
+; GFX7-NEXT: s_waitcnt vmcnt(4)
+; GFX7-NEXT: v_lshrrev_b32_e32 v24, 16, v24
+; GFX7-NEXT: s_waitcnt vmcnt(3)
+; GFX7-NEXT: v_lshrrev_b32_e32 v28, 16, v28
; GFX7-NEXT: s_waitcnt vmcnt(0)
; GFX7-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX7-NEXT: v_cndmask_b32_e32 v11, v11, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GFX7-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:44
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX7-NEXT: v_cndmask_b32_e32 v10, v10, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v9
-; GFX7-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:40
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX7-NEXT: v_cndmask_b32_e32 v9, v9, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v8
-; GFX7-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:36
+; GFX7-NEXT: v_alignbit_b32 v11, v11, v12, 16
+; GFX7-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:28
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX7-NEXT: v_cndmask_b32_e32 v8, v8, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v7
-; GFX7-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:32
-; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX7-NEXT: v_cndmask_b32_e32 v7, v7, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v6
-; GFX7-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
+; GFX7-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX7-NEXT: v_alignbit_b32 v12, v12, v13, 16
+; GFX7-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX7-NEXT: v_cndmask_b32_e32 v6, v6, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX7-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:24
+; GFX7-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX7-NEXT: v_alignbit_b32 v13, v13, v14, 16
+; GFX7-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:44
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX7-NEXT: v_cndmask_b32_e32 v5, v5, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v4
-; GFX7-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:20
+; GFX7-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX7-NEXT: v_alignbit_b32 v14, v14, v15, 16
+; GFX7-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:52
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_cndmask_b32_e32 v4, v4, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v3
-; GFX7-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:16
+; GFX7-NEXT: v_lshrrev_b32_e32 v15, 16, v15
+; GFX7-NEXT: v_alignbit_b32 v15, v15, v16, 16
+; GFX7-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:60
+; GFX7-NEXT: v_cndmask_b32_e32 v15, v15, v6, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v15
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_cndmask_b32_e32 v3, v3, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX7-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:12
+; GFX7-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX7-NEXT: v_alignbit_b32 v16, v16, v17, 16
+; GFX7-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:68
+; GFX7-NEXT: v_cndmask_b32_e32 v16, v16, v7, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v7, v13, v4, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v16
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v0, vcc
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX7-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:8
+; GFX7-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX7-NEXT: v_alignbit_b32 v17, v17, v18, 16
+; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:76
+; GFX7-NEXT: v_cndmask_b32_e32 v17, v17, v8, vcc
; GFX7-NEXT: s_waitcnt vmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX7-NEXT: v_alignbit_b32 v18, v18, v31, 16
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80
+; GFX7-NEXT: v_cndmask_b32_e32 v18, v18, v9, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v9, v14, v5, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v5, v12, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v3, v11, v2, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; GFX7-NEXT: v_lshlrev_b32_e32 v5, 16, v6
-; GFX7-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; GFX7-NEXT: v_lshlrev_b32_e32 v7, 16, v8
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX7-NEXT: v_lshlrev_b32_e32 v8, 16, v9
-; GFX7-NEXT: v_lshlrev_b32_e32 v9, 16, v10
-; GFX7-NEXT: v_lshlrev_b32_e32 v10, 16, v11
-; GFX7-NEXT: v_lshlrev_b32_e32 v11, 16, v12
-; GFX7-NEXT: v_lshlrev_b32_e32 v12, 16, v13
-; GFX7-NEXT: v_lshlrev_b32_e32 v13, 16, v14
-; GFX7-NEXT: v_lshlrev_b32_e32 v14, 16, v15
-; GFX7-NEXT: v_lshlrev_b32_e32 v15, 16, v16
-; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v17
-; GFX7-NEXT: v_lshlrev_b32_e32 v17, 16, v18
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v15
+; GFX7-NEXT: v_lshlrev_b32_e32 v12, 16, v16
+; GFX7-NEXT: v_lshlrev_b32_e32 v14, 16, v17
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v17
+; GFX7-NEXT: v_lshlrev_b32_e32 v16, 16, v18
+; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v18
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_alignbit_b32 v20, v20, v31, 16
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:88
+; GFX7-NEXT: v_cndmask_b32_e32 v19, v20, v19, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v18, 16, v19
-; GFX7-NEXT: v_lshlrev_b32_e32 v19, 16, v20
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_alignbit_b32 v22, v22, v31, 16
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
+; GFX7-NEXT: v_cndmask_b32_e32 v21, v22, v21, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v20, 16, v21
-; GFX7-NEXT: v_lshlrev_b32_e32 v21, 16, v22
+; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_alignbit_b32 v24, v24, v31, 16
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:104
+; GFX7-NEXT: v_cndmask_b32_e32 v23, v24, v23, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v22, 16, v23
-; GFX7-NEXT: v_lshlrev_b32_e32 v23, 16, v24
+; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_alignbit_b32 v26, v26, v31, 16
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:112
+; GFX7-NEXT: v_cndmask_b32_e32 v25, v26, v25, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v24, 16, v25
-; GFX7-NEXT: v_lshlrev_b32_e32 v25, 16, v26
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_alignbit_b32 v28, v28, v31, 16
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120
+; GFX7-NEXT: v_cndmask_b32_e32 v27, v28, v27, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v26, 16, v27
-; GFX7-NEXT: v_lshlrev_b32_e32 v27, 16, v28
+; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_alignbit_b32 v30, v30, v31, 16
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
+; GFX7-NEXT: v_cndmask_b32_e32 v29, v30, v29, vcc
; GFX7-NEXT: v_lshlrev_b32_e32 v28, 16, v29
-; GFX7-NEXT: v_lshlrev_b32_e32 v29, 16, v30
-; GFX7-NEXT: v_lshlrev_b32_e32 v30, 16, v32
+; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_lshrrev_b32_e32 v31, 16, v31
+; GFX7-NEXT: v_alignbit_b32 v31, v31, v32, 16
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:132
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_lshrrev_b32_e32 v32, 16, v32
+; GFX7-NEXT: v_alignbit_b32 v32, v32, v33, 16
+; GFX7-NEXT: v_cndmask_b32_e32 v31, v32, v31, vcc
+; GFX7-NEXT: v_lshlrev_b32_e32 v30, 16, v31
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_select_v32bf16:
@@ -26506,106 +26317,26 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; GFX8-NEXT: v_lshrrev_b32_e32 v32, 16, v30
-; GFX8-NEXT: v_cndmask_b32_e32 v14, v30, v14, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v13
-; GFX8-NEXT: v_lshrrev_b32_e32 v30, 16, v29
-; GFX8-NEXT: v_cndmask_b32_e32 v30, v30, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v13, v29, v13, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v12
-; GFX8-NEXT: v_lshrrev_b32_e32 v29, 16, v28
-; GFX8-NEXT: v_cndmask_b32_e32 v29, v29, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v12, v28, v12, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v11
-; GFX8-NEXT: v_lshrrev_b32_e32 v28, 16, v27
-; GFX8-NEXT: v_cndmask_b32_e32 v28, v28, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v11, v27, v11, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v10
-; GFX8-NEXT: v_lshrrev_b32_e32 v27, 16, v26
-; GFX8-NEXT: v_cndmask_b32_e32 v27, v27, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v10, v26, v10, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v9
-; GFX8-NEXT: v_lshrrev_b32_e32 v26, 16, v25
-; GFX8-NEXT: v_cndmask_b32_e32 v26, v26, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v9, v25, v9, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v8
-; GFX8-NEXT: v_lshrrev_b32_e32 v25, 16, v24
-; GFX8-NEXT: v_cndmask_b32_e32 v25, v25, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v8, v24, v8, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v7
-; GFX8-NEXT: v_lshrrev_b32_e32 v24, 16, v23
-; GFX8-NEXT: v_cndmask_b32_e32 v24, v24, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v23, v7, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v6
-; GFX8-NEXT: v_lshrrev_b32_e32 v23, 16, v22
-; GFX8-NEXT: v_cndmask_b32_e32 v23, v23, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v22, v6, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_lshrrev_b32_e32 v22, 16, v21
-; GFX8-NEXT: v_cndmask_b32_e32 v31, v32, v31, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v22, v22, v0, vcc
-; GFX8-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4
-; GFX8-NEXT: buffer_load_dword v32, off, s[0:3], s32
-; GFX8-NEXT: v_lshrrev_b32_e32 v33, 16, v16
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v21, v5, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v21, 16, v20
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v17, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v18, v2, vcc
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32
+; GFX8-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:4
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v19, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v20, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v21, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v22, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v23, v7, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v24, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v25, v9, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v26, v10, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v10, v27, v11, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v11, v28, v12, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v29, v13, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v30, v14, vcc
; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: v_cndmask_b32_e32 v16, v0, v16, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v33, v0, v33, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v15
+; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v15, vcc
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_cndmask_b32_e32 v15, v32, v15, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX8-NEXT: v_cndmask_b32_e32 v32, v32, v0, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v21, v21, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v20, v4, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; GFX8-NEXT: v_cndmask_b32_e32 v20, v20, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v19, 16, v18
-; GFX8-NEXT: v_cndmask_b32_e32 v19, v19, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_lshrrev_b32_e32 v18, 16, v17
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v18, v0, vcc
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v19
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v20
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v21
-; GFX8-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v22
-; GFX8-NEXT: v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v23
-; GFX8-NEXT: v_or_b32_sdwa v5, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v24
-; GFX8-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v25
-; GFX8-NEXT: v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v26
-; GFX8-NEXT: v_or_b32_sdwa v8, v9, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v27
-; GFX8-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v28
-; GFX8-NEXT: v_or_b32_sdwa v10, v11, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v29
-; GFX8-NEXT: v_or_b32_sdwa v11, v12, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v30
-; GFX8-NEXT: v_or_b32_sdwa v12, v13, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v31
-; GFX8-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v32
-; GFX8-NEXT: v_or_b32_sdwa v14, v15, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v33
-; GFX8-NEXT: v_or_b32_sdwa v15, v16, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e32 v15, v18, v16, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_select_v32bf16:
@@ -26689,75 +26420,51 @@ define <32 x bfloat> @v_select_v32bf16(i1 %cond, <32 x bfloat> %a, <32 x bfloat>
define amdgpu_ps <2 x i32> @s_select_v3bf16(<3 x bfloat> inreg %a, <3 x bfloat> inreg %b, i32 %c) {
; GCN-LABEL: s_select_v3bf16:
; GCN: ; %bb.0:
-; GCN-NEXT: s_lshr_b32 s2, s2, 16
-; GCN-NEXT: s_lshr_b32 s5, s5, 16
; GCN-NEXT: s_lshr_b32 s1, s1, 16
-; GCN-NEXT: s_lshr_b32 s0, s0, 16
-; GCN-NEXT: s_lshr_b32 s3, s3, 16
-; GCN-NEXT: s_lshr_b32 s4, s4, 16
-; GCN-NEXT: v_mov_b32_e32 v1, s3
-; GCN-NEXT: v_mov_b32_e32 v2, s0
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
-; GCN-NEXT: v_mov_b32_e32 v1, s4
-; GCN-NEXT: v_mov_b32_e32 v2, s1
-; GCN-NEXT: v_mov_b32_e32 v3, s5
+; GCN-NEXT: v_mov_b32_e32 v1, s0
+; GCN-NEXT: s_lshr_b32 s0, s4, 16
+; GCN-NEXT: v_mov_b32_e32 v2, s3
+; GCN-NEXT: s_lshr_b32 s2, s2, 16
+; GCN-NEXT: s_lshr_b32 s3, s5, 16
+; GCN-NEXT: v_alignbit_b32 v1, s1, v1, 16
+; GCN-NEXT: v_alignbit_b32 v2, s0, v2, 16
+; GCN-NEXT: v_mov_b32_e32 v3, s3
; GCN-NEXT: v_mov_b32_e32 v4, s2
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT: v_or_b32_e32 v0, v0, v1
-; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: v_readfirstlane_b32 s1, v2
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GCN-NEXT: v_readfirstlane_b32 s0, v1
+; GCN-NEXT: v_readfirstlane_b32 s1, v0
; GCN-NEXT: ; return to shader part epilog
;
; GFX7-LABEL: s_select_v3bf16:
; GFX7: ; %bb.0:
-; GFX7-NEXT: s_lshr_b32 s0, s0, 16
-; GFX7-NEXT: s_lshr_b32 s3, s3, 16
; GFX7-NEXT: s_lshr_b32 s1, s1, 16
-; GFX7-NEXT: s_lshr_b32 s4, s4, 16
-; GFX7-NEXT: v_mov_b32_e32 v1, s3
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
+; GFX7-NEXT: v_mov_b32_e32 v1, s0
+; GFX7-NEXT: s_lshr_b32 s0, s4, 16
+; GFX7-NEXT: v_mov_b32_e32 v2, s3
+; GFX7-NEXT: v_alignbit_b32 v1, s1, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v2, s0, v2, 16
+; GFX7-NEXT: s_lshr_b32 s0, s2, 16
+; GFX7-NEXT: s_lshr_b32 s1, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v3, s1
+; GFX7-NEXT: v_mov_b32_e32 v4, s0
; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
-; GFX7-NEXT: v_mov_b32_e32 v1, s4
-; GFX7-NEXT: v_mov_b32_e32 v2, s1
-; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX7-NEXT: s_lshr_b32 s2, s2, 16
-; GFX7-NEXT: s_lshr_b32 s5, s5, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_mov_b32_e32 v1, s5
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX7-NEXT: v_readfirstlane_b32 s0, v0
-; GFX7-NEXT: v_readfirstlane_b32 s1, v1
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v3, v4, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX7-NEXT: v_readfirstlane_b32 s0, v1
+; GFX7-NEXT: v_readfirstlane_b32 s1, v0
; GFX7-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_select_v3bf16:
; GFX8: ; %bb.0:
-; GFX8-NEXT: s_lshr_b32 s4, s0, 16
-; GFX8-NEXT: s_lshr_b32 s5, s2, 16
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
-; GFX8-NEXT: v_mov_b32_e32 v1, s2
-; GFX8-NEXT: v_mov_b32_e32 v2, s0
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX8-NEXT: s_lshr_b32 s0, s1, 16
-; GFX8-NEXT: s_lshr_b32 s2, s3, 16
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: v_mov_b32_e32 v1, s2
; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX8-NEXT: v_mov_b32_e32 v2, s3
-; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: v_readfirstlane_b32 s1, v1
@@ -26819,88 +26526,54 @@ define amdgpu_ps <2 x i32> @s_select_v4bf16(<4 x bfloat> inreg %a, <4 x bfloat>
; GCN-LABEL: s_select_v4bf16:
; GCN: ; %bb.0:
; GCN-NEXT: s_lshr_b32 s1, s1, 16
-; GCN-NEXT: s_lshr_b32 s5, s5, 16
-; GCN-NEXT: s_lshr_b32 s0, s0, 16
-; GCN-NEXT: s_lshr_b32 s4, s4, 16
+; GCN-NEXT: v_mov_b32_e32 v1, s0
+; GCN-NEXT: s_lshr_b32 s0, s5, 16
+; GCN-NEXT: v_mov_b32_e32 v2, s4
; GCN-NEXT: s_lshr_b32 s3, s3, 16
-; GCN-NEXT: s_lshr_b32 s2, s2, 16
-; GCN-NEXT: s_lshr_b32 s6, s6, 16
-; GCN-NEXT: s_lshr_b32 s7, s7, 16
-; GCN-NEXT: v_mov_b32_e32 v1, s6
-; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: v_mov_b32_e32 v3, s2
+; GCN-NEXT: s_lshr_b32 s2, s7, 16
+; GCN-NEXT: v_mov_b32_e32 v4, s6
+; GCN-NEXT: v_alignbit_b32 v1, s1, v1, 16
+; GCN-NEXT: v_alignbit_b32 v2, s0, v2, 16
+; GCN-NEXT: v_alignbit_b32 v3, s3, v3, 16
+; GCN-NEXT: v_alignbit_b32 v4, s2, v4, 16
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
-; GCN-NEXT: v_mov_b32_e32 v1, s7
-; GCN-NEXT: v_mov_b32_e32 v2, s3
-; GCN-NEXT: v_mov_b32_e32 v3, s4
-; GCN-NEXT: v_mov_b32_e32 v4, s0
-; GCN-NEXT: v_mov_b32_e32 v5, s5
-; GCN-NEXT: v_mov_b32_e32 v6, s1
-; GCN-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
-; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GCN-NEXT: v_or_b32_e32 v0, v0, v1
-; GCN-NEXT: v_or_b32_e32 v1, v2, v3
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; GCN-NEXT: v_readfirstlane_b32 s0, v1
; GCN-NEXT: v_readfirstlane_b32 s1, v0
; GCN-NEXT: ; return to shader part epilog
;
; GFX7-LABEL: s_select_v4bf16:
; GFX7: ; %bb.0:
-; GFX7-NEXT: s_lshr_b32 s2, s2, 16
-; GFX7-NEXT: s_lshr_b32 s6, s6, 16
-; GFX7-NEXT: s_lshr_b32 s3, s3, 16
-; GFX7-NEXT: s_lshr_b32 s7, s7, 16
-; GFX7-NEXT: v_mov_b32_e32 v1, s6
-; GFX7-NEXT: v_mov_b32_e32 v2, s2
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
-; GFX7-NEXT: v_mov_b32_e32 v1, s7
-; GFX7-NEXT: v_mov_b32_e32 v2, s3
-; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX7-NEXT: s_lshr_b32 s0, s0, 16
-; GFX7-NEXT: s_lshr_b32 s4, s4, 16
-; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_mov_b32_e32 v1, s0
+; GFX7-NEXT: s_lshr_b32 s0, s5, 16
+; GFX7-NEXT: v_mov_b32_e32 v2, s4
+; GFX7-NEXT: v_alignbit_b32 v2, s0, v2, 16
+; GFX7-NEXT: s_lshr_b32 s0, s3, 16
+; GFX7-NEXT: v_mov_b32_e32 v3, s2
; GFX7-NEXT: s_lshr_b32 s1, s1, 16
-; GFX7-NEXT: s_lshr_b32 s5, s5, 16
-; GFX7-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7-NEXT: v_mov_b32_e32 v1, s4
-; GFX7-NEXT: v_mov_b32_e32 v2, s0
-; GFX7-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX7-NEXT: v_mov_b32_e32 v2, s5
-; GFX7-NEXT: v_mov_b32_e32 v3, s1
-; GFX7-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_alignbit_b32 v3, s0, v3, 16
+; GFX7-NEXT: s_lshr_b32 s0, s7, 16
+; GFX7-NEXT: v_mov_b32_e32 v4, s6
+; GFX7-NEXT: v_alignbit_b32 v1, s1, v1, 16
+; GFX7-NEXT: v_alignbit_b32 v4, s0, v4, 16
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v4, v3, vcc
+; GFX7-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
; GFX7-NEXT: v_readfirstlane_b32 s0, v1
; GFX7-NEXT: v_readfirstlane_b32 s1, v0
; GFX7-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_select_v4bf16:
; GFX8: ; %bb.0:
-; GFX8-NEXT: s_lshr_b32 s4, s1, 16
-; GFX8-NEXT: s_lshr_b32 s5, s3, 16
-; GFX8-NEXT: v_mov_b32_e32 v1, s5
-; GFX8-NEXT: v_mov_b32_e32 v2, s4
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
-; GFX8-NEXT: v_mov_b32_e32 v1, s3
-; GFX8-NEXT: v_mov_b32_e32 v2, s1
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX8-NEXT: s_lshr_b32 s1, s0, 16
-; GFX8-NEXT: s_lshr_b32 s3, s2, 16
-; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: v_mov_b32_e32 v1, s3
; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v1, s2
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX8-NEXT: v_mov_b32_e32 v2, s2
-; GFX8-NEXT: v_mov_b32_e32 v3, s0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: v_readfirstlane_b32 s0, v1
; GFX8-NEXT: v_readfirstlane_b32 s1, v0
; GFX8-NEXT: ; return to shader part epilog
@@ -28555,235 +28228,171 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX8-NEXT: v_writelane_b32 v31, s30, 0
; GFX8-NEXT: v_writelane_b32 v31, s31, 1
; GFX8-NEXT: v_writelane_b32 v31, s34, 2
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
; GFX8-NEXT: v_writelane_b32 v31, s35, 3
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v1
; GFX8-NEXT: v_writelane_b32 v31, s36, 4
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v2
; GFX8-NEXT: v_writelane_b32 v31, s37, 5
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[6:7], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v3
+; GFX8-NEXT: v_and_b32_e32 v21, 1, v21
+; GFX8-NEXT: v_and_b32_e32 v18, 1, v18
; GFX8-NEXT: v_writelane_b32 v31, s38, 6
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[8:9], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v4
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[22:23], 1, v21
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[28:29], 1, v18
+; GFX8-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68
+; GFX8-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4
+; GFX8-NEXT: v_and_b32_e32 v17, 1, v17
+; GFX8-NEXT: v_and_b32_e32 v16, 1, v16
; GFX8-NEXT: v_writelane_b32 v31, s39, 7
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[10:11], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v5
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[30:31], 1, v17
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[34:35], 1, v16
+; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:72
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:8
+; GFX8-NEXT: v_and_b32_e32 v15, 1, v15
+; GFX8-NEXT: v_and_b32_e32 v14, 1, v14
; GFX8-NEXT: v_writelane_b32 v31, s40, 8
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v6
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[36:37], 1, v15
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[38:39], 1, v14
+; GFX8-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:76
+; GFX8-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:12
; GFX8-NEXT: v_writelane_b32 v31, s41, 9
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[14:15], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v7
; GFX8-NEXT: v_writelane_b32 v31, s42, 10
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[16:17], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v8
+; GFX8-NEXT: v_and_b32_e32 v13, 1, v13
+; GFX8-NEXT: v_and_b32_e32 v12, 1, v12
; GFX8-NEXT: v_writelane_b32 v31, s43, 11
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[18:19], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v9
+; GFX8-NEXT: v_and_b32_e32 v20, 1, v20
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[40:41], 1, v13
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[42:43], 1, v12
+; GFX8-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:80
+; GFX8-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:16
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[24:25], 1, v20
+; GFX8-NEXT: buffer_load_ushort v20, off, s[0:3], s32
; GFX8-NEXT: v_writelane_b32 v31, s44, 12
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[20:21], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v10
; GFX8-NEXT: v_writelane_b32 v31, s45, 13
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[22:23], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v11
; GFX8-NEXT: v_writelane_b32 v31, s46, 14
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[24:25], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v12
; GFX8-NEXT: v_writelane_b32 v31, s47, 15
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[26:27], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v13
; GFX8-NEXT: v_writelane_b32 v31, s48, 16
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[28:29], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v14
; GFX8-NEXT: v_writelane_b32 v31, s49, 17
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[30:31], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v15
; GFX8-NEXT: v_writelane_b32 v31, s50, 18
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[34:35], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v16
; GFX8-NEXT: v_writelane_b32 v31, s51, 19
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[36:37], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v17
; GFX8-NEXT: v_writelane_b32 v31, s52, 20
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[38:39], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v18
; GFX8-NEXT: v_writelane_b32 v31, s53, 21
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[40:41], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v19
; GFX8-NEXT: v_writelane_b32 v31, s54, 22
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[42:43], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v20
; GFX8-NEXT: v_writelane_b32 v31, s55, 23
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[44:45], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v21
; GFX8-NEXT: v_writelane_b32 v31, s56, 24
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[46:47], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v22
; GFX8-NEXT: v_writelane_b32 v31, s57, 25
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[48:49], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v23
; GFX8-NEXT: v_writelane_b32 v31, s58, 26
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[50:51], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v24
; GFX8-NEXT: v_writelane_b32 v31, s59, 27
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[52:53], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v25
; GFX8-NEXT: v_writelane_b32 v31, s60, 28
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[54:55], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v26
; GFX8-NEXT: v_writelane_b32 v31, s61, 29
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[56:57], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v27
; GFX8-NEXT: v_writelane_b32 v31, s62, 30
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[58:59], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v28
; GFX8-NEXT: v_writelane_b32 v31, s63, 31
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[60:61], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v29
; GFX8-NEXT: v_writelane_b32 v31, s64, 32
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[62:63], 1, v0
-; GFX8-NEXT: v_and_b32_e32 v0, 1, v30
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX8-NEXT: v_and_b32_e32 v7, 1, v7
; GFX8-NEXT: v_writelane_b32 v31, s65, 33
-; GFX8-NEXT: v_cmp_eq_u32_e64 s[64:65], 1, v0
-; GFX8-NEXT: buffer_load_ushort v0, off, s[0:3], s32
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[50:51], 1, v8
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[52:53], 1, v7
+; GFX8-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:84
+; GFX8-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:20
+; GFX8-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX8-NEXT: v_and_b32_e32 v1, 1, v1
; GFX8-NEXT: v_writelane_b32 v31, s66, 34
-; GFX8-NEXT: v_writelane_b32 v31, s67, 35
-; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_and_b32_e32 v3, 1, v3
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[62:63], 1, v2
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[64:65], 1, v1
; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_writelane_b32 v31, s67, 35
+; GFX8-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX8-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX8-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[60:61], 1, v3
; GFX8-NEXT: v_cmp_eq_u32_e64 s[66:67], 1, v0
-; GFX8-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:68
-; GFX8-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
-; GFX8-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72
-; GFX8-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
-; GFX8-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:76
-; GFX8-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:12
-; GFX8-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:80
-; GFX8-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:16
-; GFX8-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:84
-; GFX8-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:20
-; GFX8-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:88
-; GFX8-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:24
-; GFX8-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:92
-; GFX8-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:28
-; GFX8-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:96
-; GFX8-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:32
-; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:100
-; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:36
-; GFX8-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:104
-; GFX8-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:40
-; GFX8-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:108
-; GFX8-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:44
-; GFX8-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:112
-; GFX8-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:48
-; GFX8-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116
-; GFX8-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:52
-; GFX8-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:120
-; GFX8-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:56
-; GFX8-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:124
-; GFX8-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
-; GFX8-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:128
-; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64
-; GFX8-NEXT: s_waitcnt vmcnt(1)
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v29
-; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v28, 16, v33
-; GFX8-NEXT: v_cndmask_b32_e64 v28, v34, v28, s[66:67]
-; GFX8-NEXT: v_cndmask_b32_e64 v29, v29, v33, s[64:65]
-; GFX8-NEXT: v_lshrrev_b32_e32 v33, 16, v32
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v30
-; GFX8-NEXT: v_cndmask_b32_e64 v33, v34, v33, s[62:63]
-; GFX8-NEXT: v_cndmask_b32_e64 v30, v30, v32, s[60:61]
-; GFX8-NEXT: v_lshrrev_b32_e32 v32, 16, v27
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v26
-; GFX8-NEXT: v_cndmask_b32_e64 v32, v34, v32, s[58:59]
-; GFX8-NEXT: v_cndmask_b32_e64 v26, v26, v27, s[56:57]
-; GFX8-NEXT: v_lshrrev_b32_e32 v27, 16, v25
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v24
-; GFX8-NEXT: v_cndmask_b32_e64 v27, v34, v27, s[54:55]
-; GFX8-NEXT: v_cndmask_b32_e64 v24, v24, v25, s[52:53]
-; GFX8-NEXT: v_lshrrev_b32_e32 v25, 16, v23
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v22
-; GFX8-NEXT: v_cndmask_b32_e64 v25, v34, v25, s[50:51]
-; GFX8-NEXT: v_cndmask_b32_e64 v22, v22, v23, s[48:49]
-; GFX8-NEXT: v_lshrrev_b32_e32 v23, 16, v21
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v20
-; GFX8-NEXT: v_cndmask_b32_e64 v23, v34, v23, s[46:47]
-; GFX8-NEXT: v_cndmask_b32_e64 v20, v20, v21, s[44:45]
-; GFX8-NEXT: v_lshrrev_b32_e32 v21, 16, v19
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v18
-; GFX8-NEXT: v_cndmask_b32_e64 v21, v34, v21, s[42:43]
-; GFX8-NEXT: v_cndmask_b32_e64 v18, v18, v19, s[40:41]
-; GFX8-NEXT: v_lshrrev_b32_e32 v19, 16, v17
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v16
-; GFX8-NEXT: v_cndmask_b32_e64 v19, v34, v19, s[38:39]
-; GFX8-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[36:37]
-; GFX8-NEXT: v_lshrrev_b32_e32 v17, 16, v15
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX8-NEXT: v_cndmask_b32_e64 v17, v34, v17, s[34:35]
-; GFX8-NEXT: v_cndmask_b32_e64 v14, v14, v15, s[30:31]
-; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v13
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v12
-; GFX8-NEXT: v_cndmask_b32_e64 v15, v34, v15, s[28:29]
-; GFX8-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[26:27]
-; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v11
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v10
-; GFX8-NEXT: v_cndmask_b32_e64 v13, v34, v13, s[24:25]
-; GFX8-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[22:23]
-; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v9
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v8
-; GFX8-NEXT: v_cndmask_b32_e64 v11, v34, v11, s[20:21]
-; GFX8-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[18:19]
-; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v7
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v6
-; GFX8-NEXT: v_cndmask_b32_e64 v9, v34, v9, s[16:17]
-; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[14:15]
-; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v5
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v4
-; GFX8-NEXT: v_cndmask_b32_e64 v7, v34, v7, s[12:13]
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[10:11]
-; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v2
-; GFX8-NEXT: v_cndmask_b32_e64 v5, v34, v5, s[8:9]
-; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[6:7]
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v1
-; GFX8-NEXT: v_lshrrev_b32_e32 v34, 16, v0
-; GFX8-NEXT: v_cndmask_b32_e64 v3, v34, v3, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[54:55], 1, v6
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[56:57], 1, v5
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[58:59], 1, v4
+; GFX8-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:88
+; GFX8-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:24
+; GFX8-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX8-NEXT: v_and_b32_e32 v9, 1, v9
+; GFX8-NEXT: v_and_b32_e32 v11, 1, v11
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[46:47], 1, v10
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[48:49], 1, v9
+; GFX8-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:92
+; GFX8-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:28
+; GFX8-NEXT: v_and_b32_e32 v25, 1, v25
+; GFX8-NEXT: v_and_b32_e32 v24, 1, v24
+; GFX8-NEXT: v_and_b32_e32 v23, 1, v23
+; GFX8-NEXT: v_and_b32_e32 v22, 1, v22
+; GFX8-NEXT: v_and_b32_e32 v19, 1, v19
+; GFX8-NEXT: s_waitcnt vmcnt(14)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX8-NEXT: s_waitcnt vmcnt(13)
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v21
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, v1, s[64:65]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v18, v21, s[66:67]
+; GFX8-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:36
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: s_waitcnt vmcnt(13)
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v16
+; GFX8-NEXT: s_waitcnt vmcnt(12)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v17
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[60:61]
; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v5
-; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v7
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v9
-; GFX8-NEXT: v_or_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v3, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v11
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v13
-; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v15
-; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v17
-; GFX8-NEXT: v_or_b32_sdwa v4, v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v5, v10, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v6, v12, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v7, v14, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v19
-; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v21
-; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v23
-; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v25
-; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v27
-; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v32
-; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v33
-; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v28
-; GFX8-NEXT: v_or_b32_sdwa v8, v16, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v9, v18, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v10, v20, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v11, v22, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v12, v24, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v13, v26, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v14, v30, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX8-NEXT: v_or_b32_sdwa v15, v29, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v16, v17, s[62:63]
+; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:40
+; GFX8-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:44
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:56
+; GFX8-NEXT: s_waitcnt vmcnt(13)
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v15
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v14
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v4, v3, s[56:57]
+; GFX8-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v14, v15, s[58:59]
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: s_waitcnt vmcnt(11)
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v12, v13, s[54:55]
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v13
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[44:45], 1, v11
+; GFX8-NEXT: s_waitcnt vmcnt(10)
+; GFX8-NEXT: v_and_b32_e32 v11, 1, v20
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v12, v4, s[52:53]
+; GFX8-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:128
+; GFX8-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:116
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[12:13], 1, v25
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[14:15], 1, v24
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[16:17], 1, v23
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[20:21], 1, v22
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[26:27], 1, v19
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[18:19], 1, v11
+; GFX8-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:112
+; GFX8-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:108
+; GFX8-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:96
+; GFX8-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:32
+; GFX8-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:104
+; GFX8-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:100
+; GFX8-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:48
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:120
+; GFX8-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:124
+; GFX8-NEXT: v_and_b32_e32 v26, 1, v26
+; GFX8-NEXT: v_and_b32_e32 v28, 1, v28
+; GFX8-NEXT: v_and_b32_e32 v27, 1, v27
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[10:11], 1, v26
+; GFX8-NEXT: v_and_b32_e32 v29, 1, v29
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[6:7], 1, v28
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[8:9], 1, v27
+; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v29
+; GFX8-NEXT: v_and_b32_e32 v30, 1, v30
+; GFX8-NEXT: s_waitcnt vmcnt(14)
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v7, v8, s[50:51]
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, v8, s[48:49]
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_or_b32_sdwa v4, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 1, v30
; GFX8-NEXT: v_readlane_b32 s67, v31, 35
; GFX8-NEXT: v_readlane_b32 s66, v31, 34
; GFX8-NEXT: v_readlane_b32 s65, v31, 33
@@ -28794,6 +28403,18 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX8-NEXT: v_readlane_b32 s60, v31, 28
; GFX8-NEXT: v_readlane_b32 s59, v31, 27
; GFX8-NEXT: v_readlane_b32 s58, v31, 26
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v5, v6, s[46:47]
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v6, s[44:45]
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_or_b32_sdwa v5, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v10
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v9
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v9, v10, s[42:43]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[40:41]
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_or_b32_sdwa v6, v6, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: v_readlane_b32 s57, v31, 25
; GFX8-NEXT: v_readlane_b32 s56, v31, 24
; GFX8-NEXT: v_readlane_b32 s55, v31, 23
@@ -28812,6 +28433,43 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX8-NEXT: v_readlane_b32 s42, v31, 10
; GFX8-NEXT: v_readlane_b32 s41, v31, 9
; GFX8-NEXT: v_readlane_b32 s40, v31, 8
+; GFX8-NEXT: s_waitcnt vmcnt(6)
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v22
+; GFX8-NEXT: s_waitcnt vmcnt(5)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v23
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[36:37]
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v18
+; GFX8-NEXT: s_waitcnt vmcnt(3)
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v25
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v22, v23, s[38:39]
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v10, v9, s[30:31]
+; GFX8-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v25, v18, s[34:35]
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v24, v16, s[28:29]
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v16
+; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v24
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v16, v10, s[26:27]
+; GFX8-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:52
+; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX8-NEXT: v_or_b32_sdwa v9, v9, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v11, v21, s[24:25]
+; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v21
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v11, v16, s[22:23]
+; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:60
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_or_b32_sdwa v10, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: s_waitcnt vmcnt(4)
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v19, v20, s[20:21]
+; GFX8-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; GFX8-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX8-NEXT: v_cndmask_b32_e64 v19, v19, v20, s[16:17]
+; GFX8-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64
+; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; GFX8-NEXT: v_or_b32_sdwa v11, v11, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: v_readlane_b32 s39, v31, 7
; GFX8-NEXT: v_readlane_b32 s38, v31, 6
; GFX8-NEXT: v_readlane_b32 s37, v31, 5
@@ -28820,6 +28478,33 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX8-NEXT: v_readlane_b32 s34, v31, 2
; GFX8-NEXT: v_readlane_b32 s31, v31, 1
; GFX8-NEXT: v_readlane_b32 s30, v31, 0
+; GFX8-NEXT: s_waitcnt vmcnt(2)
+; GFX8-NEXT: v_cndmask_b32_e64 v19, v12, v18, s[14:15]
+; GFX8-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_cndmask_b32_e64 v12, v12, v18, s[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v18, v13, v17, s[10:11]
+; GFX8-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_cndmask_b32_e64 v13, v13, v17, s[8:9]
+; GFX8-NEXT: s_waitcnt vmcnt(1)
+; GFX8-NEXT: v_cndmask_b32_e64 v17, v14, v16, s[6:7]
+; GFX8-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX8-NEXT: v_cndmask_b32_e64 v14, v14, v16, s[4:5]
+; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX8-NEXT: v_or_b32_sdwa v14, v17, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_cndmask_b32_e32 v16, v15, v20, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v17, 16, v20
+; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15
+; GFX8-NEXT: v_cndmask_b32_e64 v15, v15, v17, s[18:19]
+; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; GFX8-NEXT: v_or_b32_sdwa v12, v19, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_sdwa v13, v18, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_sdwa v15, v16, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX8-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX8-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; GFX8-NEXT: s_mov_b64 exec, s[4:5]
@@ -28835,223 +28520,169 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX9-NEXT: v_writelane_b32 v31, s30, 0
; GFX9-NEXT: v_writelane_b32 v31, s31, 1
; GFX9-NEXT: v_writelane_b32 v31, s34, 2
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
; GFX9-NEXT: v_writelane_b32 v31, s35, 3
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v3
; GFX9-NEXT: v_writelane_b32 v31, s36, 4
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v2
; GFX9-NEXT: v_writelane_b32 v31, s37, 5
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[8:9], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v5
; GFX9-NEXT: v_writelane_b32 v31, s38, 6
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[10:11], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v4
; GFX9-NEXT: v_writelane_b32 v31, s39, 7
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v7
; GFX9-NEXT: v_writelane_b32 v31, s40, 8
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[14:15], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v6
; GFX9-NEXT: v_writelane_b32 v31, s41, 9
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[16:17], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v9
; GFX9-NEXT: v_writelane_b32 v31, s42, 10
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[18:19], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v8
; GFX9-NEXT: v_writelane_b32 v31, s43, 11
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[20:21], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v11
; GFX9-NEXT: v_writelane_b32 v31, s44, 12
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[22:23], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v10
; GFX9-NEXT: v_writelane_b32 v31, s45, 13
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[24:25], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v13
; GFX9-NEXT: v_writelane_b32 v31, s46, 14
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[26:27], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v12
; GFX9-NEXT: v_writelane_b32 v31, s47, 15
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[28:29], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v15
; GFX9-NEXT: v_writelane_b32 v31, s48, 16
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[30:31], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v14
; GFX9-NEXT: v_writelane_b32 v31, s49, 17
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[34:35], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v17
; GFX9-NEXT: v_writelane_b32 v31, s50, 18
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[36:37], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v16
; GFX9-NEXT: v_writelane_b32 v31, s51, 19
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[38:39], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v19
+; GFX9-NEXT: v_and_b32_e32 v21, 1, v21
+; GFX9-NEXT: v_and_b32_e32 v18, 1, v18
; GFX9-NEXT: v_writelane_b32 v31, s52, 20
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[40:41], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v18
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[22:23], 1, v21
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[28:29], 1, v18
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:4
+; GFX9-NEXT: v_and_b32_e32 v17, 1, v17
+; GFX9-NEXT: v_and_b32_e32 v16, 1, v16
; GFX9-NEXT: v_writelane_b32 v31, s53, 21
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[42:43], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v21
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[30:31], 1, v17
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[34:35], 1, v16
+; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:72
+; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:8
; GFX9-NEXT: v_writelane_b32 v31, s54, 22
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[44:45], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v20
+; GFX9-NEXT: v_and_b32_e32 v15, 1, v15
+; GFX9-NEXT: v_and_b32_e32 v14, 1, v14
; GFX9-NEXT: v_writelane_b32 v31, s55, 23
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[46:47], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v23
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[36:37], 1, v15
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[38:39], 1, v14
+; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:12
; GFX9-NEXT: v_writelane_b32 v31, s56, 24
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[48:49], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v22
+; GFX9-NEXT: v_and_b32_e32 v13, 1, v13
+; GFX9-NEXT: v_and_b32_e32 v12, 1, v12
; GFX9-NEXT: v_writelane_b32 v31, s57, 25
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[50:51], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v25
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[40:41], 1, v13
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[42:43], 1, v12
+; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:80
+; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:16
; GFX9-NEXT: v_writelane_b32 v31, s58, 26
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[52:53], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v24
+; GFX9-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
; GFX9-NEXT: v_writelane_b32 v31, s59, 27
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[54:55], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v27
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[56:57], 1, v5
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[58:59], 1, v4
+; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:20
+; GFX9-NEXT: v_and_b32_e32 v20, 1, v20
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[24:25], 1, v20
+; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32
; GFX9-NEXT: v_writelane_b32 v31, s60, 28
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[56:57], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v26
; GFX9-NEXT: v_writelane_b32 v31, s61, 29
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[58:59], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v29
; GFX9-NEXT: v_writelane_b32 v31, s62, 30
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[60:61], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v28
; GFX9-NEXT: v_writelane_b32 v31, s63, 31
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[62:63], 1, v0
-; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32
; GFX9-NEXT: v_writelane_b32 v31, s64, 32
; GFX9-NEXT: v_writelane_b32 v31, s65, 33
; GFX9-NEXT: v_writelane_b32 v31, s66, 34
+; GFX9-NEXT: v_and_b32_e32 v2, 1, v2
; GFX9-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX9-NEXT: v_writelane_b32 v31, s67, 35
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e64 s[64:65], 1, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v30
+; GFX9-NEXT: v_writelane_b32 v31, s67, 35
+; GFX9-NEXT: v_and_b32_e32 v3, 1, v3
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[62:63], 1, v2
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[64:65], 1, v1
; GFX9-NEXT: v_cmp_eq_u32_e64 s[66:67], 1, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:72
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:80
-; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:88
-; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:96
-; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:32
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:36
-; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:104
-; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:40
-; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:108
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[60:61], 1, v3
+; GFX9-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX9-NEXT: v_and_b32_e32 v7, 1, v7
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[54:55], 1, v6
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[52:53], 1, v7
+; GFX9-NEXT: v_and_b32_e32 v9, 1, v9
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[50:51], 1, v8
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[48:49], 1, v9
+; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:88
+; GFX9-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:24
+; GFX9-NEXT: v_and_b32_e32 v24, 1, v24
+; GFX9-NEXT: v_and_b32_e32 v11, 1, v11
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[14:15], 1, v24
+; GFX9-NEXT: v_and_b32_e32 v23, 1, v23
+; GFX9-NEXT: v_and_b32_e32 v22, 1, v22
+; GFX9-NEXT: v_and_b32_e32 v19, 1, v19
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[44:45], 1, v11
+; GFX9-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:48
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[16:17], 1, v23
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[20:21], 1, v22
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[26:27], 1, v19
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[46:47], 1, v10
+; GFX9-NEXT: v_and_b32_e32 v26, 1, v26
+; GFX9-NEXT: v_and_b32_e32 v25, 1, v25
+; GFX9-NEXT: v_and_b32_e32 v28, 1, v28
+; GFX9-NEXT: v_and_b32_e32 v27, 1, v27
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[10:11], 1, v26
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[12:13], 1, v25
+; GFX9-NEXT: v_and_b32_e32 v29, 1, v29
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[6:7], 1, v28
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[8:9], 1, v27
+; GFX9-NEXT: v_and_b32_e32 v30, 1, v30
+; GFX9-NEXT: s_waitcnt vmcnt(13)
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v21
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v18, v21, s[66:67]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, v1, s[64:65]
+; GFX9-NEXT: s_mov_b32 s64, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v1, v0, s64
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v16
+; GFX9-NEXT: s_waitcnt vmcnt(10)
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v17
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v16, v17, s[62:63]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v3, v2, s[60:61]
+; GFX9-NEXT: v_perm_b32 v1, v2, v1, s64
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:40
; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:112
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:48
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:120
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:56
-; GFX9-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
-; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:64
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_cndmask_b32_e64 v29, v32, v33, s[66:67]
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v33
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX9-NEXT: v_cndmask_b32_e64 v32, v32, v33, s[64:65]
-; GFX9-NEXT: v_cndmask_b32_e64 v33, v28, v30, s[62:63]
-; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v28
-; GFX9-NEXT: v_cndmask_b32_e64 v28, v28, v30, s[60:61]
-; GFX9-NEXT: v_cndmask_b32_e64 v30, v26, v27, s[58:59]
-; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v27
-; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v26
-; GFX9-NEXT: v_cndmask_b32_e64 v26, v26, v27, s[56:57]
-; GFX9-NEXT: v_cndmask_b32_e64 v27, v24, v25, s[54:55]
-; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v25
-; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX9-NEXT: v_cndmask_b32_e64 v24, v24, v25, s[52:53]
-; GFX9-NEXT: v_cndmask_b32_e64 v25, v22, v23, s[50:51]
-; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v23
-; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v22
-; GFX9-NEXT: v_cndmask_b32_e64 v22, v22, v23, s[48:49]
-; GFX9-NEXT: v_cndmask_b32_e64 v23, v20, v21, s[46:47]
-; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v21
-; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX9-NEXT: v_cndmask_b32_e64 v20, v20, v21, s[44:45]
-; GFX9-NEXT: v_cndmask_b32_e64 v21, v18, v19, s[42:43]
-; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX9-NEXT: v_cndmask_b32_e64 v18, v18, v19, s[40:41]
-; GFX9-NEXT: v_cndmask_b32_e64 v19, v16, v17, s[38:39]
-; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17
-; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX9-NEXT: v_cndmask_b32_e64 v16, v16, v17, s[36:37]
-; GFX9-NEXT: v_cndmask_b32_e64 v17, v14, v15, s[34:35]
-; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v15
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v14, v15, s[58:59]
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX9-NEXT: v_cndmask_b32_e64 v14, v14, v15, s[30:31]
-; GFX9-NEXT: v_cndmask_b32_e64 v15, v12, v13, s[28:29]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v14, v3, s[56:57]
+; GFX9-NEXT: v_perm_b32 v2, v3, v2, s64
+; GFX9-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:128
+; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v12, v13, s[54:55]
; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[26:27]
-; GFX9-NEXT: v_cndmask_b32_e64 v13, v10, v11, s[24:25]
-; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, v11, s[22:23]
-; GFX9-NEXT: v_cndmask_b32_e64 v11, v8, v9, s[20:21]
-; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; GFX9-NEXT: v_cndmask_b32_e64 v8, v8, v9, s[18:19]
-; GFX9-NEXT: v_cndmask_b32_e64 v9, v6, v7, s[16:17]
-; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
-; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[14:15]
-; GFX9-NEXT: v_cndmask_b32_e64 v7, v4, v5, s[12:13]
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v13, s[52:53]
+; GFX9-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:120
+; GFX9-NEXT: v_perm_b32 v3, v12, v3, s64
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 1, v29
+; GFX9-NEXT: s_waitcnt vmcnt(10)
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v4, v5, s[50:51]
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[10:11]
-; GFX9-NEXT: v_cndmask_b32_e64 v5, v2, v3, s[8:9]
-; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, v3, s[6:7]
-; GFX9-NEXT: v_cndmask_b32_e64 v3, v0, v1, s[4:5]
-; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX9-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-NEXT: v_perm_b32 v0, v0, v3, s4
-; GFX9-NEXT: v_perm_b32 v1, v2, v5, s4
-; GFX9-NEXT: v_perm_b32 v2, v4, v7, s4
-; GFX9-NEXT: v_perm_b32 v3, v6, v9, s4
-; GFX9-NEXT: v_perm_b32 v4, v8, v11, s4
-; GFX9-NEXT: v_perm_b32 v5, v10, v13, s4
-; GFX9-NEXT: v_perm_b32 v6, v12, v15, s4
-; GFX9-NEXT: v_perm_b32 v7, v14, v17, s4
-; GFX9-NEXT: v_perm_b32 v8, v16, v19, s4
-; GFX9-NEXT: v_perm_b32 v9, v18, v21, s4
-; GFX9-NEXT: v_perm_b32 v10, v20, v23, s4
-; GFX9-NEXT: v_perm_b32 v11, v22, v25, s4
-; GFX9-NEXT: v_perm_b32 v12, v24, v27, s4
-; GFX9-NEXT: v_perm_b32 v13, v26, v30, s4
-; GFX9-NEXT: v_perm_b32 v14, v28, v33, s4
-; GFX9-NEXT: v_perm_b32 v15, v32, v29, s4
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v5, s[48:49]
+; GFX9-NEXT: v_perm_b32 v4, v4, v12, s64
+; GFX9-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:116
+; GFX9-NEXT: s_waitcnt vmcnt(10)
+; GFX9-NEXT: v_and_b32_e32 v11, 1, v20
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[18:19], 1, v11
+; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:112
+; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:104
+; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:96
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:32
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v30
; GFX9-NEXT: v_readlane_b32 s67, v31, 35
; GFX9-NEXT: v_readlane_b32 s66, v31, 34
; GFX9-NEXT: v_readlane_b32 s65, v31, 33
-; GFX9-NEXT: v_readlane_b32 s64, v31, 32
; GFX9-NEXT: v_readlane_b32 s63, v31, 31
; GFX9-NEXT: v_readlane_b32 s62, v31, 30
; GFX9-NEXT: v_readlane_b32 s61, v31, 29
@@ -29067,11 +28698,54 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX9-NEXT: v_readlane_b32 s51, v31, 19
; GFX9-NEXT: v_readlane_b32 s50, v31, 18
; GFX9-NEXT: v_readlane_b32 s49, v31, 17
+; GFX9-NEXT: s_waitcnt vmcnt(16)
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v6, v7, s[46:47]
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, v7, s[44:45]
+; GFX9-NEXT: v_perm_b32 v5, v6, v5, s64
; GFX9-NEXT: v_readlane_b32 s48, v31, 16
; GFX9-NEXT: v_readlane_b32 s47, v31, 15
; GFX9-NEXT: v_readlane_b32 s46, v31, 14
; GFX9-NEXT: v_readlane_b32 s45, v31, 13
; GFX9-NEXT: v_readlane_b32 s44, v31, 12
+; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v8, v9, s[42:43]
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v8, v7, s[40:41]
+; GFX9-NEXT: v_perm_b32 v6, v7, v6, s64
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v22
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v23
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v22, v23, s[38:39]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v9, v8, s[36:37]
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v20
+; GFX9-NEXT: v_perm_b32 v7, v8, v7, s64
+; GFX9-NEXT: v_cndmask_b32_e64 v8, v20, v18, s[34:35]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v17, v9, s[30:31]
+; GFX9-NEXT: v_perm_b32 v8, v9, v8, s64
+; GFX9-NEXT: v_cndmask_b32_e64 v9, v11, v16, s[28:29]
+; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:56
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v11, v16, s[26:27]
+; GFX9-NEXT: v_perm_b32 v9, v11, v9, s64
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v10, v21, s[24:25]
+; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v21
+; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX9-NEXT: v_cndmask_b32_e64 v10, v10, v16, s[22:23]
+; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:60
+; GFX9-NEXT: v_perm_b32 v10, v10, v11, s64
+; GFX9-NEXT: v_cndmask_b32_e64 v11, v19, v24, s[20:21]
+; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v24
+; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v19
+; GFX9-NEXT: v_cndmask_b32_e64 v19, v19, v20, s[16:17]
+; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:64
+; GFX9-NEXT: v_perm_b32 v11, v19, v11, s64
; GFX9-NEXT: v_readlane_b32 s43, v31, 11
; GFX9-NEXT: v_readlane_b32 s42, v31, 10
; GFX9-NEXT: v_readlane_b32 s41, v31, 9
@@ -29084,6 +28758,31 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX9-NEXT: v_readlane_b32 s34, v31, 2
; GFX9-NEXT: v_readlane_b32 s31, v31, 1
; GFX9-NEXT: v_readlane_b32 s30, v31, 0
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_cndmask_b32_e64 v19, v12, v18, s[14:15]
+; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX9-NEXT: v_cndmask_b32_e64 v12, v12, v18, s[12:13]
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_cndmask_b32_e64 v18, v13, v17, s[10:11]
+; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX9-NEXT: v_cndmask_b32_e64 v13, v13, v17, s[8:9]
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_cndmask_b32_e64 v17, v14, v16, s[6:7]
+; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX9-NEXT: v_cndmask_b32_e64 v14, v14, v16, s[4:5]
+; GFX9-NEXT: v_perm_b32 v14, v14, v17, s64
+; GFX9-NEXT: v_perm_b32 v12, v12, v19, s64
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cndmask_b32_e32 v16, v15, v20, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v15
+; GFX9-NEXT: v_cndmask_b32_e64 v15, v15, v17, s[18:19]
+; GFX9-NEXT: v_perm_b32 v13, v13, v18, s64
+; GFX9-NEXT: v_perm_b32 v15, v15, v16, s64
+; GFX9-NEXT: v_readlane_b32 s64, v31, 32
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
@@ -29097,208 +28796,205 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX10-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-NEXT: s_mov_b32 exec_lo, s4
-; GFX10-NEXT: v_and_b32_e32 v29, 1, v29
-; GFX10-NEXT: v_and_b32_e32 v30, 1, v30
-; GFX10-NEXT: v_and_b32_e32 v28, 1, v28
-; GFX10-NEXT: v_and_b32_e32 v26, 1, v26
-; GFX10-NEXT: v_and_b32_e32 v24, 1, v24
-; GFX10-NEXT: v_and_b32_e32 v22, 1, v22
-; GFX10-NEXT: v_and_b32_e32 v20, 1, v20
-; GFX10-NEXT: v_and_b32_e32 v18, 1, v18
-; GFX10-NEXT: v_and_b32_e32 v16, 1, v16
-; GFX10-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX10-NEXT: v_and_b32_e32 v3, 1, v3
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX10-NEXT: v_and_b32_e32 v1, 1, v1
+; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 1, v3
+; GFX10-NEXT: v_and_b32_e32 v3, 1, v6
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX10-NEXT: v_and_b32_e32 v10, 1, v10
; GFX10-NEXT: v_and_b32_e32 v12, 1, v12
-; GFX10-NEXT: s_clause 0x14
-; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
-; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:124
-; GFX10-NEXT: buffer_load_ushort v34, off, s[0:3], s32
-; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:128
-; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:64
-; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
-; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:116
-; GFX10-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:52
-; GFX10-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:120
-; GFX10-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:56
-; GFX10-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:32
-; GFX10-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:100
-; GFX10-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:36
-; GFX10-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:104
-; GFX10-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:40
-; GFX10-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:108
-; GFX10-NEXT: buffer_load_dword v64, off, s[0:3], s32 offset:44
-; GFX10-NEXT: buffer_load_dword v65, off, s[0:3], s32 offset:112
-; GFX10-NEXT: buffer_load_dword v66, off, s[0:3], s32 offset:72
-; GFX10-NEXT: buffer_load_dword v67, off, s[0:3], s32 offset:76
-; GFX10-NEXT: buffer_load_dword v68, off, s[0:3], s32 offset:80
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v29
+; GFX10-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX10-NEXT: v_and_b32_e32 v16, 1, v16
+; GFX10-NEXT: s_clause 0x15
+; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
+; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
+; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:72
+; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:8
+; GFX10-NEXT: buffer_load_ushort v36, off, s[0:3], s32
+; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:76
+; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:12
+; GFX10-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:80
+; GFX10-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:16
+; GFX10-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20
+; GFX10-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:84
+; GFX10-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:88
+; GFX10-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:24
+; GFX10-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:92
+; GFX10-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:28
+; GFX10-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:96
+; GFX10-NEXT: buffer_load_dword v64, off, s[0:3], s32 offset:32
+; GFX10-NEXT: buffer_load_dword v65, off, s[0:3], s32 offset:36
+; GFX10-NEXT: buffer_load_dword v66, off, s[0:3], s32 offset:104
+; GFX10-NEXT: buffer_load_dword v67, off, s[0:3], s32 offset:40
+; GFX10-NEXT: buffer_load_dword v68, off, s[0:3], s32 offset:100
+; GFX10-NEXT: buffer_load_dword v69, off, s[0:3], s32 offset:52
+; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 1, v0
+; GFX10-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:112
+; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 1, v2
+; GFX10-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:48
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
+; GFX10-NEXT: v_cmp_eq_u32_e64 s7, 1, v4
+; GFX10-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:120
+; GFX10-NEXT: v_cmp_eq_u32_e64 s8, 1, v3
+; GFX10-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:56
+; GFX10-NEXT: v_cmp_eq_u32_e64 s9, 1, v8
; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:92
-; GFX10-NEXT: buffer_load_dword v69, off, s[0:3], s32 offset:28
-; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 1, v30
-; GFX10-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:96
-; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 1, v28
-; GFX10-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:88
-; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 1, v26
-; GFX10-NEXT: v_cmp_eq_u32_e64 s7, 1, v24
-; GFX10-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:84
-; GFX10-NEXT: v_cmp_eq_u32_e64 s8, 1, v22
-; GFX10-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:20
-; GFX10-NEXT: v_cmp_eq_u32_e64 s9, 1, v20
-; GFX10-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16
-; GFX10-NEXT: v_cmp_eq_u32_e64 s10, 1, v18
-; GFX10-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:12
-; GFX10-NEXT: v_cmp_eq_u32_e64 s11, 1, v16
-; GFX10-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:8
+; GFX10-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:116
+; GFX10-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:108
+; GFX10-NEXT: v_cmp_eq_u32_e64 s10, 1, v10
+; GFX10-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:124
+; GFX10-NEXT: v_cmp_eq_u32_e64 s11, 1, v12
+; GFX10-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:60
; GFX10-NEXT: v_cmp_eq_u32_e64 s12, 1, v14
+; GFX10-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:128
+; GFX10-NEXT: v_cmp_eq_u32_e64 s13, 1, v16
; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:68
-; GFX10-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:24
-; GFX10-NEXT: v_cmp_eq_u32_e64 s13, 1, v12
-; GFX10-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:4
+; GFX10-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:64
+; GFX10-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:44
; GFX10-NEXT: v_writelane_b32 v31, s30, 0
-; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX10-NEXT: v_and_b32_e32 v2, 1, v2
-; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
-; GFX10-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX10-NEXT: v_and_b32_e32 v30, 1, v30
+; GFX10-NEXT: v_and_b32_e32 v28, 1, v28
+; GFX10-NEXT: v_and_b32_e32 v26, 1, v26
+; GFX10-NEXT: v_and_b32_e32 v24, 1, v24
; GFX10-NEXT: v_writelane_b32 v31, s31, 1
-; GFX10-NEXT: v_and_b32_e32 v8, 1, v8
-; GFX10-NEXT: v_and_b32_e32 v10, 1, v10
-; GFX10-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX10-NEXT: v_and_b32_e32 v3, 1, v3
-; GFX10-NEXT: v_writelane_b32 v31, s34, 2
-; GFX10-NEXT: v_and_b32_e32 v5, 1, v5
-; GFX10-NEXT: v_and_b32_e32 v7, 1, v7
-; GFX10-NEXT: v_and_b32_e32 v9, 1, v9
-; GFX10-NEXT: v_and_b32_e32 v11, 1, v11
-; GFX10-NEXT: v_and_b32_e32 v13, 1, v13
-; GFX10-NEXT: v_and_b32_e32 v15, 1, v15
+; GFX10-NEXT: v_and_b32_e32 v22, 1, v22
+; GFX10-NEXT: v_and_b32_e32 v20, 1, v20
; GFX10-NEXT: v_and_b32_e32 v17, 1, v17
-; GFX10-NEXT: v_and_b32_e32 v19, 1, v19
-; GFX10-NEXT: v_and_b32_e32 v21, 1, v21
-; GFX10-NEXT: v_and_b32_e32 v23, 1, v23
-; GFX10-NEXT: v_and_b32_e32 v25, 1, v25
+; GFX10-NEXT: v_and_b32_e32 v9, 1, v9
+; GFX10-NEXT: v_and_b32_e32 v7, 1, v7
+; GFX10-NEXT: v_writelane_b32 v31, s34, 2
+; GFX10-NEXT: v_and_b32_e32 v29, 1, v29
; GFX10-NEXT: v_and_b32_e32 v27, 1, v27
-; GFX10-NEXT: v_cmp_eq_u32_e64 s14, 1, v10
-; GFX10-NEXT: v_cmp_eq_u32_e64 s15, 1, v8
-; GFX10-NEXT: v_cmp_eq_u32_e64 s16, 1, v6
-; GFX10-NEXT: v_cmp_eq_u32_e64 s17, 1, v4
-; GFX10-NEXT: v_cmp_eq_u32_e64 s18, 1, v2
-; GFX10-NEXT: v_cmp_eq_u32_e64 s19, 1, v0
+; GFX10-NEXT: v_and_b32_e32 v25, 1, v25
+; GFX10-NEXT: v_and_b32_e32 v23, 1, v23
+; GFX10-NEXT: v_and_b32_e32 v21, 1, v21
+; GFX10-NEXT: v_and_b32_e32 v19, 1, v19
+; GFX10-NEXT: v_and_b32_e32 v18, 1, v18
+; GFX10-NEXT: v_and_b32_e32 v15, 1, v15
+; GFX10-NEXT: v_and_b32_e32 v13, 1, v13
+; GFX10-NEXT: v_and_b32_e32 v11, 1, v11
+; GFX10-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX10-NEXT: v_cmp_eq_u32_e64 s15, 1, v20
+; GFX10-NEXT: v_cmp_eq_u32_e64 s16, 1, v22
+; GFX10-NEXT: v_cmp_eq_u32_e64 s17, 1, v24
+; GFX10-NEXT: v_cmp_eq_u32_e64 s18, 1, v26
+; GFX10-NEXT: v_cmp_eq_u32_e64 s19, 1, v28
+; GFX10-NEXT: v_cmp_eq_u32_e64 s20, 1, v30
+; GFX10-NEXT: v_cmp_eq_u32_e64 s22, 1, v7
+; GFX10-NEXT: v_cmp_eq_u32_e64 s23, 1, v9
+; GFX10-NEXT: v_cmp_eq_u32_e64 s27, 1, v17
; GFX10-NEXT: v_writelane_b32 v31, s35, 3
-; GFX10-NEXT: v_cmp_eq_u32_e64 s20, 1, v27
-; GFX10-NEXT: v_cmp_eq_u32_e64 s21, 1, v25
-; GFX10-NEXT: v_cmp_eq_u32_e64 s22, 1, v23
-; GFX10-NEXT: v_cmp_eq_u32_e64 s23, 1, v21
-; GFX10-NEXT: v_cmp_eq_u32_e64 s24, 1, v19
-; GFX10-NEXT: v_cmp_eq_u32_e64 s25, 1, v17
+; GFX10-NEXT: v_cmp_eq_u32_e64 s14, 1, v18
+; GFX10-NEXT: v_cmp_eq_u32_e64 s21, 1, v5
+; GFX10-NEXT: v_cmp_eq_u32_e64 s24, 1, v11
+; GFX10-NEXT: v_cmp_eq_u32_e64 s25, 1, v13
; GFX10-NEXT: v_cmp_eq_u32_e64 s26, 1, v15
-; GFX10-NEXT: v_cmp_eq_u32_e64 s27, 1, v13
-; GFX10-NEXT: v_cmp_eq_u32_e64 s28, 1, v11
-; GFX10-NEXT: v_cmp_eq_u32_e64 s29, 1, v7
-; GFX10-NEXT: v_cmp_eq_u32_e64 s30, 1, v3
-; GFX10-NEXT: v_cmp_eq_u32_e64 s31, 1, v1
-; GFX10-NEXT: v_cmp_eq_u32_e64 s34, 1, v5
-; GFX10-NEXT: v_cmp_eq_u32_e64 s35, 1, v9
+; GFX10-NEXT: v_cmp_eq_u32_e64 s28, 1, v19
+; GFX10-NEXT: v_cmp_eq_u32_e64 s29, 1, v21
+; GFX10-NEXT: v_cmp_eq_u32_e64 s30, 1, v23
+; GFX10-NEXT: v_cmp_eq_u32_e64 s31, 1, v25
+; GFX10-NEXT: v_cmp_eq_u32_e64 s34, 1, v27
+; GFX10-NEXT: v_cmp_eq_u32_e64 s35, 1, v29
; GFX10-NEXT: s_waitcnt vmcnt(32)
-; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v32
+; GFX10-NEXT: v_lshrrev_b32_e32 v9, 16, v32
; GFX10-NEXT: s_waitcnt vmcnt(31)
-; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v33
-; GFX10-NEXT: s_waitcnt vmcnt(30)
-; GFX10-NEXT: v_and_b32_e32 v2, 1, v34
+; GFX10-NEXT: v_lshrrev_b32_e32 v7, 16, v33
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v32, v33, s4
; GFX10-NEXT: s_waitcnt vmcnt(29)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, 16, v35
+; GFX10-NEXT: v_cndmask_b32_e64 v11, v34, v35, s5
; GFX10-NEXT: s_waitcnt vmcnt(28)
-; GFX10-NEXT: v_cndmask_b32_e64 v15, v35, v36, s4
-; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v36
-; GFX10-NEXT: v_cndmask_b32_e64 v17, v33, v32, s5
-; GFX10-NEXT: s_waitcnt vmcnt(25)
-; GFX10-NEXT: v_cndmask_b32_e64 v19, v38, v39, s7
+; GFX10-NEXT: v_and_b32_e32 v17, 1, v36
+; GFX10-NEXT: v_lshrrev_b32_e32 v13, 16, v35
+; GFX10-NEXT: v_lshrrev_b32_e32 v15, 16, v34
+; GFX10-NEXT: s_waitcnt vmcnt(26)
+; GFX10-NEXT: v_cndmask_b32_e64 v18, v37, v38, s7
+; GFX10-NEXT: v_lshrrev_b32_e32 v19, 16, v38
+; GFX10-NEXT: v_lshrrev_b32_e32 v20, 16, v37
; GFX10-NEXT: s_waitcnt vmcnt(24)
-; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v48
-; GFX10-NEXT: s_waitcnt vmcnt(23)
-; GFX10-NEXT: v_cndmask_b32_e64 v13, v48, v49, s6
-; GFX10-NEXT: v_lshrrev_b32_e32 v5, 16, v49
-; GFX10-NEXT: v_lshrrev_b32_e32 v7, 16, v39
-; GFX10-NEXT: v_lshrrev_b32_e32 v8, 16, v38
-; GFX10-NEXT: v_lshrrev_b32_e32 v9, 16, v37
+; GFX10-NEXT: v_cndmask_b32_e64 v21, v39, v48, s8
+; GFX10-NEXT: v_lshrrev_b32_e32 v22, 16, v48
+; GFX10-NEXT: v_lshrrev_b32_e32 v23, 16, v39
+; GFX10-NEXT: s_waitcnt vmcnt(22)
+; GFX10-NEXT: v_cndmask_b32_e64 v24, v50, v49, s9
+; GFX10-NEXT: v_lshrrev_b32_e32 v25, 16, v49
+; GFX10-NEXT: v_lshrrev_b32_e32 v26, 16, v50
+; GFX10-NEXT: s_waitcnt vmcnt(20)
+; GFX10-NEXT: v_cndmask_b32_e64 v27, v51, v52, s10
+; GFX10-NEXT: v_lshrrev_b32_e32 v28, 16, v52
+; GFX10-NEXT: v_lshrrev_b32_e32 v29, 16, v51
; GFX10-NEXT: s_waitcnt vmcnt(18)
-; GFX10-NEXT: v_cndmask_b32_e64 v27, v53, v54, s10
-; GFX10-NEXT: s_waitcnt vmcnt(17)
-; GFX10-NEXT: v_lshrrev_b32_e32 v25, 16, v55
-; GFX10-NEXT: s_waitcnt vmcnt(16)
-; GFX10-NEXT: v_cndmask_b32_e64 v21, v55, v64, s9
-; GFX10-NEXT: s_waitcnt vmcnt(15)
-; GFX10-NEXT: v_cndmask_b32_e64 v11, v65, v37, s8
-; GFX10-NEXT: v_lshrrev_b32_e32 v10, 16, v65
-; GFX10-NEXT: v_lshrrev_b32_e32 v23, 16, v64
+; GFX10-NEXT: v_cndmask_b32_e64 v30, v53, v54, s11
; GFX10-NEXT: v_lshrrev_b32_e32 v32, 16, v54
; GFX10-NEXT: v_lshrrev_b32_e32 v33, 16, v53
-; GFX10-NEXT: v_cndmask_b32_e64 v34, v51, v52, s11
-; GFX10-NEXT: v_lshrrev_b32_e32 v35, 16, v52
-; GFX10-NEXT: v_lshrrev_b32_e32 v36, 16, v51
+; GFX10-NEXT: s_waitcnt vmcnt(16)
+; GFX10-NEXT: v_cndmask_b32_e64 v34, v55, v64, s12
+; GFX10-NEXT: v_lshrrev_b32_e32 v35, 16, v64
+; GFX10-NEXT: v_lshrrev_b32_e32 v36, 16, v55
+; GFX10-NEXT: s_waitcnt vmcnt(12)
+; GFX10-NEXT: v_cndmask_b32_e64 v37, v68, v65, s13
+; GFX10-NEXT: v_lshrrev_b32_e32 v38, 16, v65
+; GFX10-NEXT: v_lshrrev_b32_e32 v39, 16, v68
+; GFX10-NEXT: v_lshrrev_b32_e32 v49, 16, v67
+; GFX10-NEXT: v_lshrrev_b32_e32 v50, 16, v66
; GFX10-NEXT: s_waitcnt vmcnt(9)
-; GFX10-NEXT: v_cndmask_b32_e64 v37, v30, v50, s12
-; GFX10-NEXT: v_lshrrev_b32_e32 v38, 16, v50
-; GFX10-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; GFX10-NEXT: v_cndmask_b32_e64 v39, v29, v69, s13
-; GFX10-NEXT: v_lshrrev_b32_e32 v48, 16, v69
-; GFX10-NEXT: v_lshrrev_b32_e32 v29, 16, v29
+; GFX10-NEXT: v_cndmask_b32_e64 v52, v0, v2, s16
+; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX10-NEXT: s_waitcnt vmcnt(6)
-; GFX10-NEXT: v_cndmask_b32_e64 v50, v24, v22, s15
-; GFX10-NEXT: v_lshrrev_b32_e32 v22, 16, v22
-; GFX10-NEXT: v_lshrrev_b32_e32 v24, 16, v24
-; GFX10-NEXT: s_waitcnt vmcnt(5)
-; GFX10-NEXT: v_cndmask_b32_e64 v51, v68, v20, s16
-; GFX10-NEXT: v_lshrrev_b32_e32 v20, 16, v20
-; GFX10-NEXT: v_lshrrev_b32_e32 v52, 16, v68
-; GFX10-NEXT: s_waitcnt vmcnt(4)
-; GFX10-NEXT: v_cndmask_b32_e64 v53, v67, v18, s17
-; GFX10-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: v_cndmask_b32_e64 v49, v28, v26, s14
-; GFX10-NEXT: v_lshrrev_b32_e32 v26, 16, v26
-; GFX10-NEXT: v_lshrrev_b32_e32 v28, 16, v28
-; GFX10-NEXT: v_lshrrev_b32_e32 v54, 16, v67
-; GFX10-NEXT: v_cndmask_b32_e64 v55, v66, v16, s18
-; GFX10-NEXT: v_lshrrev_b32_e32 v16, 16, v16
-; GFX10-NEXT: v_lshrrev_b32_e32 v64, 16, v66
-; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_cndmask_b32_e64 v65, v14, v12, s19
+; GFX10-NEXT: v_cndmask_b32_e64 v53, v8, v69, s17
+; GFX10-NEXT: v_lshrrev_b32_e32 v54, 16, v69
+; GFX10-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX10-NEXT: v_cndmask_b32_e64 v55, v4, v3, s18
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX10-NEXT: s_waitcnt vmcnt(3)
+; GFX10-NEXT: v_cndmask_b32_e64 v64, v10, v12, s19
; GFX10-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_cndmask_b32_e64 v51, v1, v6, s15
+; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX10-NEXT: v_cndmask_b32_e64 v65, v14, v16, s20
+; GFX10-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; GFX10-NEXT: v_lshrrev_b32_e32 v14, 16, v14
-; GFX10-NEXT: v_cmp_eq_u32_e64 s4, 1, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v66, v1, v0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v67, v6, v5, s20
-; GFX10-NEXT: v_cndmask_b32_e64 v68, v8, v7, s21
-; GFX10-NEXT: v_cndmask_b32_e64 v69, v10, v9, s22
-; GFX10-NEXT: v_cndmask_b32_e64 v10, v25, v23, s23
-; GFX10-NEXT: v_cndmask_b32_e64 v9, v33, v32, s24
-; GFX10-NEXT: v_cndmask_b32_e64 v8, v36, v35, s25
-; GFX10-NEXT: v_cndmask_b32_e64 v7, v30, v38, s26
-; GFX10-NEXT: v_cndmask_b32_e64 v6, v29, v48, s27
-; GFX10-NEXT: v_cndmask_b32_e64 v5, v28, v26, s28
-; GFX10-NEXT: v_cndmask_b32_e64 v20, v52, v20, s29
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v14, v12, s31
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v64, v16, s30
-; GFX10-NEXT: v_cndmask_b32_e64 v2, v54, v18, s34
-; GFX10-NEXT: v_cndmask_b32_e64 v12, v24, v22, s35
-; GFX10-NEXT: v_cndmask_b32_e64 v16, v4, v3, s4
-; GFX10-NEXT: v_perm_b32 v0, v0, v65, 0x5040100
-; GFX10-NEXT: v_perm_b32 v1, v1, v55, 0x5040100
-; GFX10-NEXT: v_perm_b32 v2, v2, v53, 0x5040100
-; GFX10-NEXT: v_perm_b32 v3, v20, v51, 0x5040100
-; GFX10-NEXT: v_perm_b32 v4, v12, v50, 0x5040100
-; GFX10-NEXT: v_perm_b32 v5, v5, v49, 0x5040100
-; GFX10-NEXT: v_perm_b32 v6, v6, v39, 0x5040100
-; GFX10-NEXT: v_perm_b32 v7, v7, v37, 0x5040100
-; GFX10-NEXT: v_perm_b32 v8, v8, v34, 0x5040100
-; GFX10-NEXT: v_perm_b32 v9, v9, v27, 0x5040100
-; GFX10-NEXT: v_perm_b32 v10, v10, v21, 0x5040100
-; GFX10-NEXT: v_perm_b32 v11, v69, v11, 0x5040100
-; GFX10-NEXT: v_perm_b32 v12, v68, v19, 0x5040100
-; GFX10-NEXT: v_perm_b32 v13, v67, v13, 0x5040100
-; GFX10-NEXT: v_perm_b32 v14, v66, v17, 0x5040100
-; GFX10-NEXT: v_perm_b32 v15, v16, v15, 0x5040100
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v9, v7, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v17
+; GFX10-NEXT: v_cndmask_b32_e64 v48, v66, v67, s14
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v15, v13, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v20, v19, s21
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v23, v22, s22
+; GFX10-NEXT: v_cndmask_b32_e64 v19, v26, v25, s23
+; GFX10-NEXT: v_cndmask_b32_e64 v20, v29, v28, s24
+; GFX10-NEXT: v_cndmask_b32_e64 v22, v33, v32, s25
+; GFX10-NEXT: v_cndmask_b32_e64 v23, v36, v35, s26
+; GFX10-NEXT: v_cndmask_b32_e64 v25, v39, v38, s27
+; GFX10-NEXT: v_cndmask_b32_e64 v26, v50, v49, s28
+; GFX10-NEXT: v_cndmask_b32_e64 v28, v1, v6, s29
+; GFX10-NEXT: v_cndmask_b32_e64 v17, v0, v2, s30
+; GFX10-NEXT: v_cndmask_b32_e64 v29, v8, v54, s31
+; GFX10-NEXT: v_cndmask_b32_e64 v32, v4, v3, s34
+; GFX10-NEXT: v_cndmask_b32_e64 v33, v10, v12, s35
+; GFX10-NEXT: v_cndmask_b32_e32 v16, v14, v16, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v7, v5, 0x5040100
+; GFX10-NEXT: v_perm_b32 v1, v9, v11, 0x5040100
+; GFX10-NEXT: v_perm_b32 v2, v13, v18, 0x5040100
+; GFX10-NEXT: v_perm_b32 v3, v15, v21, 0x5040100
+; GFX10-NEXT: v_perm_b32 v4, v19, v24, 0x5040100
+; GFX10-NEXT: v_perm_b32 v5, v20, v27, 0x5040100
+; GFX10-NEXT: v_perm_b32 v6, v22, v30, 0x5040100
+; GFX10-NEXT: v_perm_b32 v7, v23, v34, 0x5040100
+; GFX10-NEXT: v_perm_b32 v8, v25, v37, 0x5040100
+; GFX10-NEXT: v_perm_b32 v9, v26, v48, 0x5040100
+; GFX10-NEXT: v_perm_b32 v10, v28, v51, 0x5040100
+; GFX10-NEXT: v_perm_b32 v11, v17, v52, 0x5040100
+; GFX10-NEXT: v_perm_b32 v12, v29, v53, 0x5040100
+; GFX10-NEXT: v_perm_b32 v13, v32, v55, 0x5040100
+; GFX10-NEXT: v_perm_b32 v14, v33, v64, 0x5040100
+; GFX10-NEXT: v_perm_b32 v15, v16, v65, 0x5040100
; GFX10-NEXT: v_readlane_b32 s35, v31, 3
; GFX10-NEXT: v_readlane_b32 s34, v31, 2
; GFX10-NEXT: v_readlane_b32 s31, v31, 1
@@ -29315,198 +29011,205 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_clause 0x20
; GFX11-NEXT: scratch_load_u16 v31, off, s32
-; GFX11-NEXT: scratch_load_b32 v32, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v33, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v34, off, s32 offset:124
-; GFX11-NEXT: scratch_load_b32 v35, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v36, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v37, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v38, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v39, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v48, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v49, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v50, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v51, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v52, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v53, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v54, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v55, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v64, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v65, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v66, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v67, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v68, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v69, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v70, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v71, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v80, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v81, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v82, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v83, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v84, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v85, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v86, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v87, off, s32 offset:4
-; GFX11-NEXT: v_and_b32_e32 v30, 1, v30
-; GFX11-NEXT: v_and_b32_e32 v28, 1, v28
-; GFX11-NEXT: v_and_b32_e32 v26, 1, v26
-; GFX11-NEXT: v_and_b32_e32 v24, 1, v24
-; GFX11-NEXT: v_and_b32_e32 v22, 1, v22
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v30
-; GFX11-NEXT: v_and_b32_e32 v3, 1, v3
-; GFX11-NEXT: v_and_b32_e32 v20, 1, v20
-; GFX11-NEXT: v_and_b32_e32 v18, 1, v18
-; GFX11-NEXT: v_and_b32_e32 v16, 1, v16
+; GFX11-NEXT: scratch_load_b32 v32, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v33, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v34, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v35, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v36, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v37, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v38, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v39, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v48, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v49, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v50, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v51, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v52, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v53, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v54, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v55, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v64, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v65, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v66, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v67, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v68, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v69, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v70, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v71, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v80, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v81, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v82, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v83, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v84, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v85, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v86, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v87, off, s32 offset:64
+; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX11-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX11-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX11-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
+; GFX11-NEXT: v_and_b32_e32 v27, 1, v27
+; GFX11-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX11-NEXT: v_and_b32_e32 v12, 1, v12
+; GFX11-NEXT: v_and_b32_e32 v14, 1, v14
; GFX11-NEXT: s_waitcnt vmcnt(30)
-; GFX11-NEXT: v_cndmask_b32_e32 v30, v32, v33, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v28
-; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v32, v33, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX11-NEXT: v_and_b32_e32 v29, 1, v29
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v33
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX11-NEXT: v_and_b32_e32 v30, 1, v30
; GFX11-NEXT: s_waitcnt vmcnt(28)
-; GFX11-NEXT: v_cndmask_b32_e32 v28, v34, v35, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v26
-; GFX11-NEXT: v_and_b32_e32 v7, 1, v7
+; GFX11-NEXT: v_cndmask_b32_e32 v2, v34, v35, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v4
+; GFX11-NEXT: v_and_b32_e32 v23, 1, v23
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v35
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v34
-; GFX11-NEXT: v_and_b32_e32 v2, 1, v2
+; GFX11-NEXT: v_and_b32_e32 v28, 1, v28
; GFX11-NEXT: s_waitcnt vmcnt(26)
-; GFX11-NEXT: v_cndmask_b32_e32 v26, v36, v37, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v24
-; GFX11-NEXT: v_and_b32_e32 v5, 1, v5
+; GFX11-NEXT: v_cndmask_b32_e32 v4, v36, v37, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v6
+; GFX11-NEXT: v_and_b32_e32 v25, 1, v25
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v37
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX11-NEXT: v_and_b32_e32 v26, 1, v26
; GFX11-NEXT: s_waitcnt vmcnt(24)
-; GFX11-NEXT: v_cndmask_b32_e32 v24, v38, v39, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v22
-; GFX11-NEXT: v_and_b32_e32 v11, 1, v11
+; GFX11-NEXT: v_cndmask_b32_e32 v6, v38, v39, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8
+; GFX11-NEXT: v_and_b32_e32 v19, 1, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v39
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; GFX11-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX11-NEXT: v_and_b32_e32 v24, 1, v24
; GFX11-NEXT: s_waitcnt vmcnt(22)
-; GFX11-NEXT: v_cndmask_b32_e32 v22, v48, v49, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v20
-; GFX11-NEXT: v_and_b32_e32 v9, 1, v9
+; GFX11-NEXT: v_cndmask_b32_e32 v8, v48, v49, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v10
+; GFX11-NEXT: v_and_b32_e32 v21, 1, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v49
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX11-NEXT: v_and_b32_e32 v22, 1, v22
; GFX11-NEXT: s_waitcnt vmcnt(20)
-; GFX11-NEXT: v_cndmask_b32_e32 v20, v50, v51, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v18
+; GFX11-NEXT: v_cndmask_b32_e32 v10, v50, v51, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12
; GFX11-NEXT: v_and_b32_e32 v15, 1, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v51
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v50
-; GFX11-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX11-NEXT: v_and_b32_e32 v20, 1, v20
; GFX11-NEXT: s_waitcnt vmcnt(18)
-; GFX11-NEXT: v_cndmask_b32_e32 v18, v52, v53, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v16
-; GFX11-NEXT: v_and_b32_e32 v13, 1, v13
+; GFX11-NEXT: v_cndmask_b32_e32 v12, v52, v53, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v14
+; GFX11-NEXT: v_and_b32_e32 v17, 1, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v53
; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v52
-; GFX11-NEXT: v_and_b32_e32 v12, 1, v12
+; GFX11-NEXT: v_and_b32_e32 v18, 1, v18
; GFX11-NEXT: s_waitcnt vmcnt(16)
-; GFX11-NEXT: v_cndmask_b32_e32 v16, v54, v55, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v14, v54, v55, vcc_lo
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v55
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v54
-; GFX11-NEXT: v_and_b32_e32 v14, 1, v14
+; GFX11-NEXT: v_and_b32_e32 v16, 1, v16
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v14
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v16
; GFX11-NEXT: s_waitcnt vmcnt(14)
-; GFX11-NEXT: v_dual_cndmask_b32 v14, v64, v65 :: v_dual_and_b32 v19, 1, v19
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v12
-; GFX11-NEXT: v_and_b32_e32 v17, 1, v17
+; GFX11-NEXT: v_dual_cndmask_b32 v16, v64, v65 :: v_dual_and_b32 v11, 1, v11
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v18
+; GFX11-NEXT: v_and_b32_e32 v13, 1, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v65
; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v64
; GFX11-NEXT: s_waitcnt vmcnt(12)
-; GFX11-NEXT: v_cndmask_b32_e32 v12, v66, v67, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v10
-; GFX11-NEXT: v_and_b32_e32 v23, 1, v23
+; GFX11-NEXT: v_cndmask_b32_e32 v18, v66, v67, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v20
+; GFX11-NEXT: v_and_b32_e32 v7, 1, v7
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v67
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v66
; GFX11-NEXT: s_waitcnt vmcnt(10)
-; GFX11-NEXT: v_cndmask_b32_e32 v10, v68, v69, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v8
-; GFX11-NEXT: v_and_b32_e32 v21, 1, v21
+; GFX11-NEXT: v_cndmask_b32_e32 v20, v68, v69, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v22
+; GFX11-NEXT: v_and_b32_e32 v9, 1, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v69
; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v68
; GFX11-NEXT: s_waitcnt vmcnt(8)
-; GFX11-NEXT: v_cndmask_b32_e32 v8, v70, v71, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v6
-; GFX11-NEXT: v_and_b32_e32 v27, 1, v27
+; GFX11-NEXT: v_cndmask_b32_e32 v22, v70, v71, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v24
+; GFX11-NEXT: v_and_b32_e32 v3, 1, v3
; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v71
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v70
; GFX11-NEXT: s_waitcnt vmcnt(6)
-; GFX11-NEXT: v_cndmask_b32_e32 v6, v80, v81, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v4
-; GFX11-NEXT: v_and_b32_e32 v25, 1, v25
+; GFX11-NEXT: v_cndmask_b32_e32 v24, v80, v81, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v26
+; GFX11-NEXT: v_and_b32_e32 v5, 1, v5
; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v81
; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v80
; GFX11-NEXT: s_waitcnt vmcnt(4)
-; GFX11-NEXT: v_cndmask_b32_e32 v4, v82, v83, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v2
+; GFX11-NEXT: v_cndmask_b32_e32 v26, v82, v83, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v28
; GFX11-NEXT: v_and_b32_e32 v31, 1, v31
; GFX11-NEXT: v_lshrrev_b32_e32 v83, 16, v83
; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v82
; GFX11-NEXT: s_waitcnt vmcnt(2)
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v84, v85, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT: v_and_b32_e32 v29, 1, v29
+; GFX11-NEXT: v_cndmask_b32_e32 v28, v84, v85, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v30
+; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v85, 16, v85
; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v84
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v86, v87, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v31
+; GFX11-NEXT: v_cndmask_b32_e32 v30, v86, v87, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v87, 16, v87
; GFX11-NEXT: v_lshrrev_b32_e32 v86, 16, v86
-; GFX11-NEXT: v_cndmask_b32_e32 v31, v32, v33, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v29
-; GFX11-NEXT: v_cndmask_b32_e32 v29, v34, v35, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v27
-; GFX11-NEXT: v_cndmask_b32_e32 v27, v36, v37, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v25
-; GFX11-NEXT: v_cndmask_b32_e32 v25, v38, v39, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v23
-; GFX11-NEXT: v_cndmask_b32_e32 v23, v48, v49, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v21
-; GFX11-NEXT: v_cndmask_b32_e32 v21, v50, v51, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v19
-; GFX11-NEXT: v_cndmask_b32_e32 v19, v52, v53, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v17
-; GFX11-NEXT: v_cndmask_b32_e32 v17, v54, v55, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v15
-; GFX11-NEXT: v_cndmask_b32_e32 v15, v64, v65, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v13
-; GFX11-NEXT: v_cndmask_b32_e32 v13, v66, v67, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v11
-; GFX11-NEXT: v_cndmask_b32_e32 v11, v68, v69, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7
-; GFX11-NEXT: v_cndmask_b32_e32 v7, v80, v81, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v1, v32, v33, vcc_lo
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3
-; GFX11-NEXT: v_cndmask_b32_e32 v3, v84, v85, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v86, v87, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
-; GFX11-NEXT: v_cndmask_b32_e32 v5, v82, v83, vcc_lo
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9
+; GFX11-NEXT: v_cndmask_b32_e32 v3, v34, v35, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v5
; GFX11-NEXT: v_perm_b32 v1, v3, v2, 0x5040100
-; GFX11-NEXT: v_perm_b32 v3, v7, v6, 0x5040100
-; GFX11-NEXT: v_perm_b32 v6, v13, v12, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v5, v36, v37, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v2, v5, v4, 0x5040100
-; GFX11-NEXT: v_cndmask_b32_e32 v9, v70, v71, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v7, v38, v39, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v9
+; GFX11-NEXT: v_perm_b32 v3, v7, v6, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v9, v48, v49, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_perm_b32 v4, v9, v8, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v11, v50, v51, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v13
; GFX11-NEXT: v_perm_b32 v5, v11, v10, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v13, v52, v53, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v15
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_perm_b32 v6, v13, v12, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v15, v54, v55, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v17
; GFX11-NEXT: v_perm_b32 v7, v15, v14, 0x5040100
-; GFX11-NEXT: v_perm_b32 v10, v21, v20, 0x5040100
-; GFX11-NEXT: v_perm_b32 v11, v23, v22, 0x5040100
-; GFX11-NEXT: v_perm_b32 v4, v9, v8, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v17, v64, v65, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v19
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v8, v17, v16, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v19, v66, v67, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v21
; GFX11-NEXT: v_perm_b32 v9, v19, v18, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v21, v68, v69, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v23
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_perm_b32 v10, v21, v20, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v23, v70, v71, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v25
+; GFX11-NEXT: v_perm_b32 v11, v23, v22, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v25, v80, v81, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v27
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_perm_b32 v12, v25, v24, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v27, v82, v83, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v29
; GFX11-NEXT: v_perm_b32 v13, v27, v26, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v29, v84, v85, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v31
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_perm_b32 v14, v29, v28, 0x5040100
+; GFX11-NEXT: v_cndmask_b32_e32 v31, v86, v87, vcc_lo
; GFX11-NEXT: v_perm_b32 v15, v31, v30, 0x5040100
; GFX11-NEXT: s_setpc_b64 s[30:31]
%op = select <32 x i1> %cond, <32 x bfloat> %a, <32 x bfloat> %b
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
index f1879f287667..d7f780e414ca 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
@@ -43,8 +43,7 @@ define amdgpu_kernel void @atomic_add_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_add_u64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -101,8 +100,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_add_u64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -166,8 +164,7 @@ define amdgpu_kernel void @atomic_add_i64_addr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_add_u64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -230,8 +227,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_add_u64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -279,8 +275,7 @@ define amdgpu_kernel void @atomic_add_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_add_u64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile add ptr %out, i64 %in syncscope("agent") seq_cst
@@ -332,8 +327,7 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_add_u64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -392,8 +386,7 @@ define amdgpu_kernel void @atomic_add_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_add_u64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -451,8 +444,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_add_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -503,8 +495,7 @@ define amdgpu_kernel void @atomic_and_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_and_b64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -561,8 +552,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_and_b64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -626,8 +616,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_and_b64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -690,8 +679,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_and_b64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -739,8 +727,7 @@ define amdgpu_kernel void @atomic_and_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_and_b64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile and ptr %out, i64 %in syncscope("agent") seq_cst
@@ -792,8 +779,7 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_and_b64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -852,8 +838,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_and_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -911,8 +896,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_and_b64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -963,8 +947,7 @@ define amdgpu_kernel void @atomic_sub_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_sub_u64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -1021,8 +1004,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_sub_u64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1086,8 +1068,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_sub_u64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -1150,8 +1131,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_sub_u64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1199,8 +1179,7 @@ define amdgpu_kernel void @atomic_sub_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_sub_u64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile sub ptr %out, i64 %in syncscope("agent") seq_cst
@@ -1252,8 +1231,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_sub_u64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1312,8 +1290,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_sub_u64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -1371,8 +1348,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_sub_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1421,7 +1397,7 @@ define amdgpu_kernel void @atomic_max_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_max_i64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -1478,7 +1454,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_max_i64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1540,7 +1516,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_max_i64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -1603,7 +1579,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_max_i64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1649,7 +1625,7 @@ define amdgpu_kernel void @atomic_max_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_max_i64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile max ptr %out, i64 %in syncscope("workgroup") seq_cst
@@ -1701,7 +1677,7 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_max_i64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1758,7 +1734,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_max_i64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -1816,7 +1792,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_max_i64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1865,7 +1841,7 @@ define amdgpu_kernel void @atomic_umax_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_max_u64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -1922,7 +1898,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_max_u64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -1984,7 +1960,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64
; GFX12-NEXT: flat_atomic_max_u64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -2047,7 +2023,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_max_u64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2093,7 +2069,7 @@ define amdgpu_kernel void @atomic_umax_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_max_u64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile umax ptr %out, i64 %in syncscope("workgroup") seq_cst
@@ -2145,7 +2121,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_max_u64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2202,7 +2178,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_max_u64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -2260,7 +2236,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_max_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2309,7 +2285,7 @@ define amdgpu_kernel void @atomic_min_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_min_i64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -2366,7 +2342,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_min_i64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2428,7 +2404,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_min_i64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -2491,7 +2467,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_min_i64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2537,7 +2513,7 @@ define amdgpu_kernel void @atomic_min_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_min_i64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile min ptr %out, i64 %in syncscope("workgroup") seq_cst
@@ -2589,7 +2565,7 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_min_i64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2646,7 +2622,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_min_i64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -2704,7 +2680,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_min_i64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2753,7 +2729,7 @@ define amdgpu_kernel void @atomic_umin_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_min_u64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -2810,7 +2786,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_min_u64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2872,7 +2848,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr %out, i64 %in, i64
; GFX12-NEXT: flat_atomic_min_u64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -2935,7 +2911,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_min_u64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -2981,7 +2957,7 @@ define amdgpu_kernel void @atomic_umin_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_min_u64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile umin ptr %out, i64 %in syncscope("workgroup") seq_cst
@@ -3033,7 +3009,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_min_u64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3090,7 +3066,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_min_u64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -3148,7 +3124,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_min_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3199,8 +3175,7 @@ define amdgpu_kernel void @atomic_or_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_or_b64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -3257,8 +3232,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_or_b64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3322,8 +3296,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64_offset(ptr %out, i64 %in, i64 %i
; GFX12-NEXT: flat_atomic_or_b64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -3386,8 +3359,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_or_b64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3435,8 +3407,7 @@ define amdgpu_kernel void @atomic_or_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_or_b64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile or ptr %out, i64 %in syncscope("agent") seq_cst
@@ -3488,8 +3459,7 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_or_b64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3548,8 +3518,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64(ptr %out, i64 %in, i64 %index) {
; GFX12-NEXT: flat_atomic_or_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -3607,8 +3576,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_or_b64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3659,8 +3627,7 @@ define amdgpu_kernel void @atomic_xchg_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -3708,8 +3675,7 @@ define amdgpu_kernel void @atomic_xchg_f64_offset(ptr %out, double %in) {
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr double, ptr %out, i64 4
@@ -3757,8 +3723,7 @@ define amdgpu_kernel void @atomic_xchg_pointer_offset(ptr %out, ptr %in) {
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr ptr, ptr %out, i32 4
@@ -3815,8 +3780,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3880,8 +3844,7 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64_offset(ptr %out, i64 %in, i64
; GFX12-NEXT: flat_atomic_swap_b64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -3944,8 +3907,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -3993,8 +3955,7 @@ define amdgpu_kernel void @atomic_xchg_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile xchg ptr %out, i64 %in syncscope("agent") seq_cst
@@ -4046,8 +4007,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -4106,8 +4066,7 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_swap_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -4165,8 +4124,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_swap_b64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -4217,8 +4175,7 @@ define amdgpu_kernel void @atomic_xor_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_xor_b64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -4275,8 +4232,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_xor_b64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -4340,8 +4296,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_xor_b64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -4404,8 +4359,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_xor_b64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -4453,8 +4407,7 @@ define amdgpu_kernel void @atomic_xor_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_xor_b64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile xor ptr %out, i64 %in syncscope("agent") seq_cst
@@ -4506,8 +4459,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_xor_b64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -4566,8 +4518,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_xor_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -4625,8 +4576,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_xor_b64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -4678,8 +4628,7 @@ define amdgpu_kernel void @atomic_load_i64_offset(ptr %in, ptr %out) {
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] offset:32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -4726,8 +4675,7 @@ define amdgpu_kernel void @atomic_load_i64(ptr %in, ptr %out) {
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -4790,8 +4738,7 @@ define amdgpu_kernel void @atomic_load_i64_addr64_offset(ptr %in, ptr %out, i64
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] offset:32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -4852,8 +4799,7 @@ define amdgpu_kernel void @atomic_load_i64_addr64(ptr %in, ptr %out, i64 %index)
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -5094,8 +5040,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_offset(ptr %out, i64 %in, i64 %old
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -5152,8 +5097,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_soffset(ptr %out, i64 %in, i64 %ol
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:3] offset:72000
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 9000
@@ -5211,8 +5155,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:5], v[0:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -5277,8 +5220,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64_offset(ptr %out, i64 %in, i
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -5349,8 +5291,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
; GFX12-NEXT: v_dual_mov_b32 v5, s3 :: v_dual_mov_b32 v4, s2
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:5], v[0:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -5408,8 +5349,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64(ptr %out, i64 %in, i64 %old) {
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%val = cmpxchg volatile ptr %out, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
@@ -5462,8 +5402,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:5], v[0:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -5523,8 +5462,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64(ptr %out, i64 %in, i64 %ind
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[4:5], v[0:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -5590,8 +5528,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
; GFX12-NEXT: v_dual_mov_b32 v5, s3 :: v_dual_mov_b32 v4, s2
; GFX12-NEXT: flat_atomic_cmpswap_b64 v[0:1], v[4:5], v[0:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -5644,8 +5581,7 @@ define amdgpu_kernel void @atomic_load_f64_offset(ptr %in, ptr %out) {
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] offset:32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -5692,8 +5628,7 @@ define amdgpu_kernel void @atomic_load_f64(ptr %in, ptr %out) {
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -5756,8 +5691,7 @@ define amdgpu_kernel void @atomic_load_f64_addr64_offset(ptr %in, ptr %out, i64
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] offset:32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -5818,8 +5752,7 @@ define amdgpu_kernel void @atomic_load_f64_addr64(ptr %in, ptr %out, i64 %index)
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX12-NEXT: flat_load_b64 v[0:1], v[0:1] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
entry:
@@ -6051,8 +5984,7 @@ define amdgpu_kernel void @atomic_inc_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -6109,8 +6041,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_inc_u64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -6174,8 +6105,7 @@ define amdgpu_kernel void @atomic_inc_i64_incr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_inc_u64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -6238,8 +6168,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -6287,8 +6216,7 @@ define amdgpu_kernel void @atomic_inc_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile uinc_wrap ptr %out, i64 %in syncscope("agent") seq_cst
@@ -6340,8 +6268,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_inc_u64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -6400,8 +6327,7 @@ define amdgpu_kernel void @atomic_inc_i64_incr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_inc_u64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -6459,8 +6385,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_inc_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -6511,8 +6436,7 @@ define amdgpu_kernel void @atomic_dec_i64_offset(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr %out, i64 4
@@ -6569,8 +6493,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_dec_u64 v[0:1], v[0:1], v[2:3] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -6634,8 +6557,7 @@ define amdgpu_kernel void @atomic_dec_i64_decr64_offset(ptr %out, i64 %in, i64 %
; GFX12-NEXT: flat_atomic_dec_u64 v[2:3], v[0:1] offset:32
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -6698,8 +6620,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3], v[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -6747,8 +6668,7 @@ define amdgpu_kernel void @atomic_dec_i64(ptr %out, i64 %in) {
; GFX12-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile udec_wrap ptr %out, i64 %in syncscope("agent") seq_cst
@@ -6800,8 +6720,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, s1
; GFX12-NEXT: flat_atomic_dec_u64 v[0:1], v[0:1], v[2:3] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
@@ -6860,8 +6779,7 @@ define amdgpu_kernel void @atomic_dec_i64_decr64(ptr %out, i64 %in, i64 %index)
; GFX12-NEXT: flat_atomic_dec_u64 v[2:3], v[0:1]
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr %out, i64 %index
@@ -6919,8 +6837,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
; GFX12-NEXT: flat_atomic_dec_u64 v[0:1], v[2:3], v[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX12-NEXT: flat_store_b64 v[2:3], v[0:1]
; GFX12-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
index de4f748413e6..b2b3f3e1bfbd 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
@@ -3583,8 +3583,7 @@ define amdgpu_ps float @atomic_global_load_saddr_i32(ptr addrspace(1) inreg %sba
; GFX12: ; %bb.0:
; GFX12-NEXT: global_load_b32 v0, v0, s[2:3] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -3621,8 +3620,7 @@ define amdgpu_ps float @atomic_global_load_saddr_i32_immneg128(ptr addrspace(1)
; GFX12: ; %bb.0:
; GFX12-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -3660,8 +3658,7 @@ define amdgpu_ps <2 x float> @atomic_global_load_saddr_i64(ptr addrspace(1) inre
; GFX12: ; %bb.0:
; GFX12-NEXT: global_load_b64 v[0:1], v0, s[2:3] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
@@ -3698,8 +3695,7 @@ define amdgpu_ps <2 x float> @atomic_global_load_saddr_i64_immneg128(ptr addrspa
; GFX12: ; %bb.0:
; GFX12-NEXT: global_load_b64 v[0:1], v0, s[2:3] offset:-128 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: ; return to shader part epilog
%zext.offset = zext i32 %voffset to i64
%gep0 = getelementptr inbounds i8, ptr addrspace(1) %sbase, i64 %zext.offset
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
index 3d11c8bc499e..325dae172d52 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll
@@ -51,8 +51,7 @@ define amdgpu_kernel void @atomic_add_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_add_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -123,8 +122,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -202,8 +200,7 @@ define amdgpu_kernel void @atomic_add_i64_addr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_add_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -284,8 +281,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -349,8 +345,7 @@ define amdgpu_kernel void @atomic_add_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_add_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile add ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
@@ -420,8 +415,7 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -496,8 +490,7 @@ define amdgpu_kernel void @atomic_add_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_add_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -575,8 +568,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_add_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -635,8 +627,7 @@ define amdgpu_kernel void @atomic_and_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_and_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -707,8 +698,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_and_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -786,8 +776,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_and_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -868,8 +857,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_and_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -933,8 +921,7 @@ define amdgpu_kernel void @atomic_and_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_and_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile and ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
@@ -1004,8 +991,7 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_and_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1080,8 +1066,7 @@ define amdgpu_kernel void @atomic_and_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_and_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -1159,8 +1144,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_and_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1219,8 +1203,7 @@ define amdgpu_kernel void @atomic_sub_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_sub_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -1291,8 +1274,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_sub_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1370,8 +1352,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_sub_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -1452,8 +1433,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_sub_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1517,8 +1497,7 @@ define amdgpu_kernel void @atomic_sub_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_sub_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile sub ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
@@ -1588,8 +1567,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_sub_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1664,8 +1642,7 @@ define amdgpu_kernel void @atomic_sub_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_sub_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -1743,8 +1720,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_sub_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1797,7 +1773,7 @@ define amdgpu_kernel void @atomic_max_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_max_i64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -1865,7 +1841,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_max_i64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1937,7 +1913,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_max_i64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -2015,7 +1991,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_max_i64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2073,7 +2049,7 @@ define amdgpu_kernel void @atomic_max_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_max_i64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile max ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
@@ -2140,7 +2116,7 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_max_i64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2209,7 +2185,7 @@ define amdgpu_kernel void @atomic_max_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_max_i64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -2284,7 +2260,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_max_i64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2337,7 +2313,7 @@ define amdgpu_kernel void @atomic_umax_i64_offset(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_max_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -2405,7 +2381,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_max_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2477,7 +2453,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr addrspace(1) %out,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_max_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -2555,7 +2531,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr addrspace(1) %o
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_max_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2613,7 +2589,7 @@ define amdgpu_kernel void @atomic_umax_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_max_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile umax ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
@@ -2680,7 +2656,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr addrspace(1) %out, ptr addrsp
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_max_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2749,7 +2725,7 @@ define amdgpu_kernel void @atomic_umax_i64_addr64(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_max_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -2824,7 +2800,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_max_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2877,7 +2853,7 @@ define amdgpu_kernel void @atomic_min_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_min_i64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -2945,7 +2921,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_min_i64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3017,7 +2993,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_min_i64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -3095,7 +3071,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_min_i64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3153,7 +3129,7 @@ define amdgpu_kernel void @atomic_min_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_min_i64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile min ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
@@ -3220,7 +3196,7 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_min_i64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3289,7 +3265,7 @@ define amdgpu_kernel void @atomic_min_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_min_i64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -3364,7 +3340,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_min_i64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3417,7 +3393,7 @@ define amdgpu_kernel void @atomic_umin_i64_offset(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_min_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -3485,7 +3461,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_min_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3557,7 +3533,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr addrspace(1) %out,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_min_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -3635,7 +3611,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr addrspace(1) %o
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_min_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3693,7 +3669,7 @@ define amdgpu_kernel void @atomic_umin_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_min_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile umin ptr addrspace(1) %out, i64 %in syncscope("workgroup") seq_cst
@@ -3760,7 +3736,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr addrspace(1) %out, ptr addrsp
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_min_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3829,7 +3805,7 @@ define amdgpu_kernel void @atomic_umin_i64_addr64(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_min_u64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -3904,7 +3880,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_min_u64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SE
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3963,8 +3939,7 @@ define amdgpu_kernel void @atomic_or_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_or_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -4035,8 +4010,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr addrspace(1) %out, ptr a
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_or_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4114,8 +4088,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64_offset(ptr addrspace(1) %out, i6
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_or_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -4196,8 +4169,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr addrspace(1) %out
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_or_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4261,8 +4233,7 @@ define amdgpu_kernel void @atomic_or_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_or_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile or ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
@@ -4332,8 +4303,7 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr addrspace(1) %out, ptr addrspac
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_or_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4408,8 +4378,7 @@ define amdgpu_kernel void @atomic_or_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_or_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -4487,8 +4456,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr addrspace(1) %out, ptr a
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_or_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4547,8 +4515,7 @@ define amdgpu_kernel void @atomic_xchg_i64_offset(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_swap_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -4603,8 +4570,7 @@ define amdgpu_kernel void @atomic_xchg_f64_offset(ptr addrspace(1) %out, double
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_swap_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr double, ptr addrspace(1) %out, i64 4
@@ -4659,8 +4625,7 @@ define amdgpu_kernel void @atomic_xchg_pointer_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_swap_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr ptr, ptr addrspace(1) %out, i64 4
@@ -4731,8 +4696,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_swap_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4810,8 +4774,7 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64_offset(ptr addrspace(1) %out,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_swap_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -4892,8 +4855,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr addrspace(1) %o
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_swap_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -4957,8 +4919,7 @@ define amdgpu_kernel void @atomic_xchg_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_swap_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile xchg ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
@@ -5028,8 +4989,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr addrspace(1) %out, ptr addrsp
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_swap_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5104,8 +5064,7 @@ define amdgpu_kernel void @atomic_xchg_i64_addr64(ptr addrspace(1) %out, i64 %in
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_swap_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -5183,8 +5142,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_swap_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5243,8 +5201,7 @@ define amdgpu_kernel void @atomic_xor_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_xor_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -5315,8 +5272,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_xor_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5394,8 +5350,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_xor_b64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -5476,8 +5431,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_xor_b64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5541,8 +5495,7 @@ define amdgpu_kernel void @atomic_xor_i64(ptr addrspace(1) %out, i64 %in) {
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_xor_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%tmp0 = atomicrmw volatile xor ptr addrspace(1) %out, i64 %in syncscope("agent") seq_cst
@@ -5612,8 +5565,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr addrspace(1) %out, ptr addrspa
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_xor_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5688,8 +5640,7 @@ define amdgpu_kernel void @atomic_xor_i64_addr64(ptr addrspace(1) %out, i64 %in,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_xor_b64 v2, v[0:1], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -5767,8 +5718,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr addrspace(1) %out, ptr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_atomic_xor_b64 v[0:1], v2, v[0:1], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5843,8 +5793,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_offset(ptr addrspace(1) %out, i64
; GFX12-NEXT: v_mov_b32_e32 v2, s0
; GFX12-NEXT: global_atomic_cmpswap_b64 v4, v[0:3], s[4:5] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -5917,8 +5866,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_soffset(ptr addrspace(1) %out, i64
; GFX12-NEXT: v_mov_b32_e32 v2, s0
; GFX12-NEXT: global_atomic_cmpswap_b64 v4, v[0:3], s[4:5] offset:72000
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 9000
@@ -5991,8 +5939,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr addrspace(1) %out,
; GFX12-NEXT: v_mov_b32_e32 v2, s6
; GFX12-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v4, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6074,8 +6021,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64_offset(ptr addrspace(1) %ou
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
; GFX12-NEXT: global_atomic_cmpswap_b64 v4, v[0:3], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -6168,8 +6114,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr addrspace(1)
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v4, v[0:1], s[6:7]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6246,8 +6191,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64(ptr addrspace(1) %out, i64 %in, i6
; GFX12-NEXT: v_mov_b32_e32 v2, s0
; GFX12-NEXT: global_atomic_cmpswap_b64 v4, v[0:3], s[4:5]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%val = cmpxchg volatile ptr addrspace(1) %out, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
@@ -6319,8 +6263,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr addrspace(1) %out, ptr add
; GFX12-NEXT: v_mov_b32_e32 v2, s6
; GFX12-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v4, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6399,8 +6342,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_addr64(ptr addrspace(1) %out, i64
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[2:3]
; GFX12-NEXT: global_atomic_cmpswap_b64 v4, v[0:3], s[0:1]
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -6490,8 +6432,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr addrspace(1) %out,
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v4, v[0:1], s[6:7]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6557,8 +6498,7 @@ define amdgpu_kernel void @atomic_load_i64_offset(ptr addrspace(1) %in, ptr addr
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: global_load_b64 v[0:1], v2, s[0:1] offset:32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6625,8 +6565,7 @@ define amdgpu_kernel void @atomic_load_i64_neg_offset(ptr addrspace(1) %in, ptr
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: global_load_b64 v[0:1], v2, s[0:1] offset:-32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6689,8 +6628,7 @@ define amdgpu_kernel void @atomic_load_i64(ptr addrspace(1) %in, ptr addrspace(1
; GFX12-NEXT: s_waitcnt lgkmcnt(0)
; GFX12-NEXT: global_load_b64 v[0:1], v2, s[0:1] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6771,8 +6709,7 @@ define amdgpu_kernel void @atomic_load_i64_addr64_offset(ptr addrspace(1) %in, p
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_load_b64 v[0:1], v2, s[0:1] offset:32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6853,8 +6790,7 @@ define amdgpu_kernel void @atomic_load_i64_addr64(ptr addrspace(1) %in, ptr addr
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_load_b64 v[0:1], v2, s[0:1] th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -6936,8 +6872,7 @@ define amdgpu_kernel void @atomic_load_f64_addr64_offset(ptr addrspace(1) %in, p
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[0:1], s[4:5]
; GFX12-NEXT: global_load_b64 v[0:1], v2, s[0:1] offset:32 th:TH_LOAD_NT
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_SYS
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7307,8 +7242,7 @@ define amdgpu_kernel void @atomic_inc_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_inc_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -7379,8 +7313,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_inc_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7458,8 +7391,7 @@ define amdgpu_kernel void @atomic_inc_i64_incr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_inc_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
@@ -7515,8 +7447,7 @@ define amdgpu_kernel void @atomic_dec_i64_offset(ptr addrspace(1) %out, i64 %in)
; GFX12-NEXT: v_mov_b32_e32 v0, s2
; GFX12-NEXT: global_atomic_dec_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
@@ -7587,8 +7518,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr addrspace(1) %out, ptr
; GFX12-NEXT: v_mov_b32_e32 v0, s4
; GFX12-NEXT: global_atomic_dec_u64 v[0:1], v2, v[0:1], s[0:1] offset:32 th:TH_ATOMIC_RETURN
; GFX12-NEXT: s_waitcnt vmcnt(0)
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[2:3]
; GFX12-NEXT: s_nop 0
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -7666,8 +7596,7 @@ define amdgpu_kernel void @atomic_dec_i64_decr64_offset(ptr addrspace(1) %out, i
; GFX12-NEXT: s_add_nc_u64 s[0:1], s[4:5], s[0:1]
; GFX12-NEXT: global_atomic_dec_u64 v2, v[0:1], s[0:1] offset:32
; GFX12-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX12-NEXT: buffer_gl0_inv
-; GFX12-NEXT: buffer_gl1_inv
+; GFX12-NEXT: global_inv scope:SCOPE_DEV
; GFX12-NEXT: s_endpgm
entry:
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index 5e90c33f3c8c..e2617fc453b5 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -4,6 +4,7 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1200 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG %s
; mul24 and mad24 are affected
@@ -106,6 +107,27 @@ define amdgpu_kernel void @test_mul_v2i32(ptr addrspace(1) %out, ptr addrspace(1
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: test_mul_v2i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s10, s6
+; GFX12-NEXT: s_mov_b32 s11, s7
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s8, s2
+; GFX12-NEXT: s_mov_b32 s9, s3
+; GFX12-NEXT: s_mov_b32 s4, s0
+; GFX12-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX12-NEXT: s_mov_b32 s5, s1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v3
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: test_mul_v2i32:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
@@ -247,6 +269,31 @@ define amdgpu_kernel void @v_mul_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_mul_v4i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s10, s6
+; GFX12-NEXT: s_mov_b32 s11, s7
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s8, s2
+; GFX12-NEXT: s_mov_b32 s9, s3
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX12-NEXT: buffer_load_b128 v[4:7], off, s[8:11], null offset:16
+; GFX12-NEXT: s_mov_b32 s4, s0
+; GFX12-NEXT: s_mov_b32 s5, s1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_lo_u32 v3, v3, v7
+; GFX12-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v5
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX12-NEXT: buffer_store_b128 v[0:3], off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_mul_v4i32:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
@@ -351,6 +398,21 @@ define amdgpu_kernel void @s_trunc_i64_mul_to_i32(ptr addrspace(1) %out, i64 %a,
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: s_trunc_i64_mul_to_i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
+; GFX12-NEXT: s_load_b32 s0, s[0:1], 0x34
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mul_i32 s0, s0, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: s_trunc_i64_mul_to_i32:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -483,6 +545,31 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(ptr addrspace(1) %out, ptr add
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_trunc_i64_mul_to_i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
+; GFX12-NEXT: s_mov_b32 s10, -1
+; GFX12-NEXT: s_mov_b32 s11, 0x31016000
+; GFX12-NEXT: s_mov_b32 s14, s10
+; GFX12-NEXT: s_mov_b32 s15, s11
+; GFX12-NEXT: s_mov_b32 s2, s10
+; GFX12-NEXT: s_mov_b32 s3, s11
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s12, s6
+; GFX12-NEXT: s_mov_b32 s13, s7
+; GFX12-NEXT: buffer_load_b32 v0, off, s[12:15], null
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null
+; GFX12-NEXT: s_mov_b32 s8, s4
+; GFX12-NEXT: s_mov_b32 s9, s5
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_lo_u32 v0, v1, v0
+; GFX12-NEXT: buffer_store_b32 v0, off, s[8:11], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_trunc_i64_mul_to_i32:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[]
@@ -587,6 +674,21 @@ define amdgpu_kernel void @mul64_sext_c(ptr addrspace(1) %out, i32 %in) {
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: mul64_sext_c:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_ashr_i32 s3, s2, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_mul_u64 s[4:5], s[2:3], 0x50
+; GFX12-NEXT: s_mov_b32 s3, 0x31016000
+; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12-NEXT: s_mov_b32 s2, -1
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: mul64_sext_c:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 4, @4, KC0[CB0:0-32], KC1[]
@@ -606,6 +708,113 @@ entry:
ret void
}
+define amdgpu_kernel void @mul64_zext_c(ptr addrspace(1) %out, i32 %in) {
+; SI-LABEL: mul64_zext_c:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dword s4, s[0:1], 0xb
+; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT: v_mov_b32_e32 v0, 0x50
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mul_hi_u32 v1, s4, v0
+; SI-NEXT: s_mulk_i32 s4, 0x50
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: mul64_zext_c:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
+; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; VI-NEXT: v_mov_b32_e32 v0, 0x50
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mad_u64_u32 v[0:1], s[2:3], s2, v0, 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_nop 2
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: mul64_zext_c:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dword s2, s[0:1], 0x2c
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-NEXT: s_mov_b32 s6, -1
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_mul_hi_u32 s0, s2, 0x50
+; GFX9-NEXT: s_mulk_i32 s2, 0x50
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: mul64_zext_c:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_clause 0x1
+; GFX10-NEXT: s_load_dword s2, s[0:1], 0x2c
+; GFX10-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
+; GFX10-NEXT: s_mov_b32 s7, 0x31016000
+; GFX10-NEXT: s_mov_b32 s6, -1
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_mul_i32 s0, s2, 0x50
+; GFX10-NEXT: s_mul_hi_u32 s1, s2, 0x50
+; GFX10-NEXT: v_mov_b32_e32 v0, s0
+; GFX10-NEXT: v_mov_b32_e32 v1, s1
+; GFX10-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: mul64_zext_c:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
+; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_mul_i32 s3, s2, 0x50
+; GFX11-NEXT: s_mul_hi_u32 s2, s2, 0x50
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s3 :: v_dual_mov_b32 v1, s2
+; GFX11-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: mul64_zext_c:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s3, 0
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mul_u64 s[4:5], s[2:3], 0x50
+; GFX12-NEXT: s_mov_b32 s3, 0x31016000
+; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12-NEXT: s_mov_b32 s2, -1
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
+; EG-LABEL: mul64_zext_c:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 4, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: ALU clause starting at 4:
+; EG-NEXT: MULHI * T0.Y, KC0[2].Z, literal.x,
+; EG-NEXT: 80(1.121039e-43), 0(0.000000e+00)
+; EG-NEXT: LSHR T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, KC0[2].Z, literal.y,
+; EG-NEXT: 2(2.802597e-45), 80(1.121039e-43)
+entry:
+ %0 = zext i32 %in to i64
+ %1 = mul i64 %0, 80
+ store i64 %1, ptr addrspace(1) %out
+ ret void
+}
+
define amdgpu_kernel void @v_mul64_sext_c(ptr addrspace(1) %out, ptr addrspace(1) %in) {
; SI-LABEL: v_mul64_sext_c:
; SI: ; %bb.0: ; %entry
@@ -706,6 +915,27 @@ define amdgpu_kernel void @v_mul64_sext_c(ptr addrspace(1) %out, ptr addrspace(1
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_mul64_sext_c:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s10, s6
+; GFX12-NEXT: s_mov_b32 s11, s7
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s8, s2
+; GFX12-NEXT: s_mov_b32 s9, s3
+; GFX12-NEXT: s_mov_b32 s4, s0
+; GFX12-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX12-NEXT: s_mov_b32 s5, s1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_hi_i32 v1, 0x50, v0
+; GFX12-NEXT: v_mul_lo_u32 v0, 0x50, v0
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_mul64_sext_c:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
@@ -732,6 +962,153 @@ entry:
ret void
}
+define amdgpu_kernel void @v_mul64_zext_c(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: v_mul64_zext_c:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
+; SI-NEXT: s_movk_i32 s2, 0x50
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_hi_u32 v1, v0, s2
+; SI-NEXT: v_mul_lo_u32 v0, v0, s2
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_mul64_zext_c:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0
+; VI-NEXT: s_movk_i32 s2, 0x50
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mad_u64_u32 v[0:1], s[2:3], v0, s2, 0
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; GFX9-LABEL: v_mul64_zext_c:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-NEXT: s_mov_b32 s6, -1
+; GFX9-NEXT: s_mov_b32 s10, s6
+; GFX9-NEXT: s_mov_b32 s11, s7
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s8, s2
+; GFX9-NEXT: s_mov_b32 s9, s3
+; GFX9-NEXT: buffer_load_dword v0, off, s[8:11], 0
+; GFX9-NEXT: s_movk_i32 s2, 0x50
+; GFX9-NEXT: s_mov_b32 s4, s0
+; GFX9-NEXT: s_mov_b32 s5, s1
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mul_hi_u32 v1, v0, s2
+; GFX9-NEXT: v_mul_lo_u32 v0, v0, s2
+; GFX9-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: v_mul64_zext_c:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX10-NEXT: s_mov_b32 s6, -1
+; GFX10-NEXT: s_mov_b32 s7, 0x31016000
+; GFX10-NEXT: s_mov_b32 s10, s6
+; GFX10-NEXT: s_mov_b32 s11, s7
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_mov_b32 s8, s2
+; GFX10-NEXT: s_mov_b32 s9, s3
+; GFX10-NEXT: s_mov_b32 s4, s0
+; GFX10-NEXT: buffer_load_dword v0, off, s[8:11], 0
+; GFX10-NEXT: s_mov_b32 s5, s1
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_mul_hi_u32 v1, 0x50, v0
+; GFX10-NEXT: v_mul_lo_u32 v0, 0x50, v0
+; GFX10-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: v_mul64_zext_c:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_mov_b32 s7, 0x31016000
+; GFX11-NEXT: s_mov_b32 s10, s6
+; GFX11-NEXT: s_mov_b32 s11, s7
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_mov_b32 s8, s2
+; GFX11-NEXT: s_mov_b32 s9, s3
+; GFX11-NEXT: s_mov_b32 s4, s0
+; GFX11-NEXT: buffer_load_b32 v0, off, s[8:11], 0
+; GFX11-NEXT: s_mov_b32 s5, s1
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_mul_hi_u32 v1, 0x50, v0
+; GFX11-NEXT: v_mul_lo_u32 v0, 0x50, v0
+; GFX11-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: v_mul64_zext_c:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s10, s6
+; GFX12-NEXT: s_mov_b32 s11, s7
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s8, s2
+; GFX12-NEXT: s_mov_b32 s9, s3
+; GFX12-NEXT: s_mov_b32 s4, s0
+; GFX12-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX12-NEXT: s_mov_b32 s5, s1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_hi_u32 v1, 0x50, v0
+; GFX12-NEXT: v_mul_lo_u32 v0, 0x50, v0
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
+; EG-LABEL: v_mul64_zext_c:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 4, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: MULHI * T0.Y, T0.X, literal.x,
+; EG-NEXT: 80(1.121039e-43), 0(0.000000e+00)
+; EG-NEXT: LSHR T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, T0.X, literal.y,
+; EG-NEXT: 2(2.802597e-45), 80(1.121039e-43)
+entry:
+ %val = load i32, ptr addrspace(1) %in, align 4
+ %ext = zext i32 %val to i64
+ %mul = mul i64 %ext, 80
+ store i64 %mul, ptr addrspace(1) %out, align 8
+ ret void
+}
+
define amdgpu_kernel void @v_mul64_sext_inline_imm(ptr addrspace(1) %out, ptr addrspace(1) %in) {
; SI-LABEL: v_mul64_sext_inline_imm:
; SI: ; %bb.0: ; %entry
@@ -829,6 +1206,27 @@ define amdgpu_kernel void @v_mul64_sext_inline_imm(ptr addrspace(1) %out, ptr ad
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_mul64_sext_inline_imm:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s10, s6
+; GFX12-NEXT: s_mov_b32 s11, s7
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s8, s2
+; GFX12-NEXT: s_mov_b32 s9, s3
+; GFX12-NEXT: s_mov_b32 s4, s0
+; GFX12-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX12-NEXT: s_mov_b32 s5, s1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_hi_i32 v1, 9, v0
+; GFX12-NEXT: v_mul_lo_u32 v0, 9, v0
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_mul64_sext_inline_imm:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
@@ -925,6 +1323,22 @@ define amdgpu_kernel void @s_mul_i32(ptr addrspace(1) %out, [8 x i32], i32 %a, [
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: s_mul_i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: s_load_b32 s2, s[0:1], 0x4c
+; GFX12-NEXT: s_load_b32 s3, s[0:1], 0x70
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mul_i32 s2, s2, s3
+; GFX12-NEXT: s_mov_b32 s3, 0x31016000
+; GFX12-NEXT: v_mov_b32_e32 v0, s2
+; GFX12-NEXT: s_mov_b32 s2, -1
+; GFX12-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: s_mul_i32:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 2, @4, KC0[CB0:0-32], KC1[]
@@ -1034,6 +1448,26 @@ define amdgpu_kernel void @v_mul_i32(ptr addrspace(1) %out, ptr addrspace(1) %in
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_mul_i32:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s10, s6
+; GFX12-NEXT: s_mov_b32 s11, s7
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s8, s2
+; GFX12-NEXT: s_mov_b32 s9, s3
+; GFX12-NEXT: s_mov_b32 s4, s0
+; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX12-NEXT: s_mov_b32 s5, s1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v1
+; GFX12-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_mul_i32:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
@@ -1133,6 +1567,23 @@ define amdgpu_kernel void @s_mul_i1(ptr addrspace(1) %out, [8 x i32], i1 %a, [8
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: s_mul_i1:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: s_load_b32 s2, s[0:1], 0x4c
+; GFX12-NEXT: s_load_b32 s3, s[0:1], 0x70
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: v_mul_lo_u16 v0, s2, s3
+; GFX12-NEXT: s_mov_b32 s3, 0x31016000
+; GFX12-NEXT: s_mov_b32 s2, -1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX12-NEXT: buffer_store_b8 v0, off, s[0:3], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: s_mul_i1:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 0, @10, KC0[], KC1[]
@@ -1272,6 +1723,30 @@ define amdgpu_kernel void @v_mul_i1(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_mul_i1:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s10, s6
+; GFX12-NEXT: s_mov_b32 s11, s7
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s8, s2
+; GFX12-NEXT: s_mov_b32 s9, s3
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: buffer_load_u8 v0, off, s[8:11], null
+; GFX12-NEXT: buffer_load_u8 v1, off, s[8:11], null offset:4
+; GFX12-NEXT: s_mov_b32 s4, s0
+; GFX12-NEXT: s_mov_b32 s5, s1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_lo_u16 v0, v0, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX12-NEXT: buffer_store_b8 v0, off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_mul_i1:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
@@ -1418,6 +1893,21 @@ define amdgpu_kernel void @s_mul_i64(ptr addrspace(1) %out, i64 %a, i64 %b) noun
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: s_mul_i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mul_u64 s[0:1], s[6:7], s[0:1]
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[4:7], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: s_mul_i64:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
@@ -1579,6 +2069,37 @@ define amdgpu_kernel void @v_mul_i64(ptr addrspace(1) %out, ptr addrspace(1) %ap
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_mul_i64:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
+; GFX12-NEXT: s_mov_b32 s10, -1
+; GFX12-NEXT: s_mov_b32 s11, 0x31016000
+; GFX12-NEXT: s_mov_b32 s2, s10
+; GFX12-NEXT: s_mov_b32 s3, s11
+; GFX12-NEXT: s_mov_b32 s14, s10
+; GFX12-NEXT: s_mov_b32 s15, s11
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s12, s6
+; GFX12-NEXT: s_mov_b32 s13, s7
+; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[12:15], null
+; GFX12-NEXT: s_mov_b32 s8, s4
+; GFX12-NEXT: s_mov_b32 s9, s5
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-NEXT: v_mul_hi_u32 v4, v0, v2
+; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u32_e32 v1, v3, v1
+; GFX12-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[8:11], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_mul_i64:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 1, @10, KC0[CB0:0-32], KC1[]
@@ -1616,30 +2137,30 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s2, 0
-; SI-NEXT: s_cbranch_scc0 .LBB13_2
+; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_mul_i32 s6, s2, s3
; SI-NEXT: s_mov_b64 s[4:5], 0
-; SI-NEXT: s_branch .LBB13_3
-; SI-NEXT: .LBB13_2:
+; SI-NEXT: s_branch .LBB15_3
+; SI-NEXT: .LBB15_2:
; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: .LBB13_3: ; %Flow
+; SI-NEXT: .LBB15_3: ; %Flow
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 vcc, vcc
-; SI-NEXT: s_cbranch_vccnz .LBB13_5
+; SI-NEXT: s_cbranch_vccnz .LBB15_5
; SI-NEXT: ; %bb.4: ; %if
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0
-; SI-NEXT: s_branch .LBB13_6
-; SI-NEXT: .LBB13_5:
+; SI-NEXT: s_branch .LBB15_6
+; SI-NEXT: .LBB15_5:
; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: .LBB13_6: ; %endif
+; SI-NEXT: .LBB15_6: ; %endif
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -1651,18 +2172,18 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s2, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_2
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_mul_i32 s6, s2, s3
; VI-NEXT: s_mov_b64 s[4:5], 0
-; VI-NEXT: s_branch .LBB13_3
-; VI-NEXT: .LBB13_2:
+; VI-NEXT: s_branch .LBB15_3
+; VI-NEXT: .LBB15_2:
; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: .LBB13_3: ; %Flow
+; VI-NEXT: .LBB15_3: ; %Flow
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; VI-NEXT: s_cbranch_vccnz .LBB13_5
+; VI-NEXT: s_cbranch_vccnz .LBB15_5
; VI-NEXT: ; %bb.4: ; %if
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
@@ -1670,10 +2191,10 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_load_dword v0, off, s[4:7], 0
-; VI-NEXT: s_branch .LBB13_6
-; VI-NEXT: .LBB13_5:
+; VI-NEXT: s_branch .LBB15_6
+; VI-NEXT: .LBB15_5:
; VI-NEXT: v_mov_b32_e32 v0, s6
-; VI-NEXT: .LBB13_6: ; %endif
+; VI-NEXT: .LBB15_6: ; %endif
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
@@ -1686,18 +2207,18 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s2, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %else
; GFX9-NEXT: s_mul_i32 s6, s2, s3
; GFX9-NEXT: s_mov_b64 s[4:5], 0
-; GFX9-NEXT: s_branch .LBB13_3
-; GFX9-NEXT: .LBB13_2:
+; GFX9-NEXT: s_branch .LBB15_3
+; GFX9-NEXT: .LBB15_2:
; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: .LBB13_3: ; %Flow
+; GFX9-NEXT: .LBB15_3: ; %Flow
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; GFX9-NEXT: s_cbranch_vccnz .LBB13_5
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_5
; GFX9-NEXT: ; %bb.4: ; %if
; GFX9-NEXT: s_mov_b32 s7, 0xf000
; GFX9-NEXT: s_mov_b32 s6, -1
@@ -1705,10 +2226,10 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_mov_b32 s4, s2
; GFX9-NEXT: s_mov_b32 s5, s3
; GFX9-NEXT: buffer_load_dword v0, off, s[4:7], 0
-; GFX9-NEXT: s_branch .LBB13_6
-; GFX9-NEXT: .LBB13_5:
+; GFX9-NEXT: s_branch .LBB15_6
+; GFX9-NEXT: .LBB15_5:
; GFX9-NEXT: v_mov_b32_e32 v0, s6
-; GFX9-NEXT: .LBB13_6: ; %endif
+; GFX9-NEXT: .LBB15_6: ; %endif
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_mov_b32 s3, 0xf000
; GFX9-NEXT: s_mov_b32 s2, -1
@@ -1722,17 +2243,17 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX10-NEXT: s_cbranch_scc0 .LBB15_2
; GFX10-NEXT: ; %bb.1: ; %else
; GFX10-NEXT: s_mul_i32 s5, s2, s3
-; GFX10-NEXT: s_branch .LBB13_3
-; GFX10-NEXT: .LBB13_2:
+; GFX10-NEXT: s_branch .LBB15_3
+; GFX10-NEXT: .LBB15_2:
; GFX10-NEXT: s_mov_b32 s4, -1
; GFX10-NEXT: ; implicit-def: $sgpr5
-; GFX10-NEXT: .LBB13_3: ; %Flow
+; GFX10-NEXT: .LBB15_3: ; %Flow
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s4
-; GFX10-NEXT: s_cbranch_vccnz .LBB13_5
+; GFX10-NEXT: s_cbranch_vccnz .LBB15_5
; GFX10-NEXT: ; %bb.4: ; %if
; GFX10-NEXT: s_mov_b32 s7, 0x31016000
; GFX10-NEXT: s_mov_b32 s6, -1
@@ -1740,10 +2261,10 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_mov_b32 s4, s2
; GFX10-NEXT: s_mov_b32 s5, s3
; GFX10-NEXT: buffer_load_dword v0, off, s[4:7], 0
-; GFX10-NEXT: s_branch .LBB13_6
-; GFX10-NEXT: .LBB13_5:
+; GFX10-NEXT: s_branch .LBB15_6
+; GFX10-NEXT: .LBB15_5:
; GFX10-NEXT: v_mov_b32_e32 v0, s5
-; GFX10-NEXT: .LBB13_6: ; %endif
+; GFX10-NEXT: .LBB15_6: ; %endif
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_mov_b32 s3, 0x31016000
; GFX10-NEXT: s_mov_b32 s2, -1
@@ -1757,17 +2278,17 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_mov_b32 s4, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %else
; GFX11-NEXT: s_mul_i32 s5, s2, s3
-; GFX11-NEXT: s_branch .LBB13_3
-; GFX11-NEXT: .LBB13_2:
+; GFX11-NEXT: s_branch .LBB15_3
+; GFX11-NEXT: .LBB15_2:
; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: .LBB13_3: ; %Flow
+; GFX11-NEXT: .LBB15_3: ; %Flow
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_5
+; GFX11-NEXT: s_cbranch_vccnz .LBB15_5
; GFX11-NEXT: ; %bb.4: ; %if
; GFX11-NEXT: s_mov_b32 s7, 0x31016000
; GFX11-NEXT: s_mov_b32 s6, -1
@@ -1775,10 +2296,10 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_mov_b32 s4, s2
; GFX11-NEXT: s_mov_b32 s5, s3
; GFX11-NEXT: buffer_load_b32 v0, off, s[4:7], 0
-; GFX11-NEXT: s_branch .LBB13_6
-; GFX11-NEXT: .LBB13_5:
+; GFX11-NEXT: s_branch .LBB15_6
+; GFX11-NEXT: .LBB15_5:
; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: .LBB13_6: ; %endif
+; GFX11-NEXT: .LBB15_6: ; %endif
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s3, 0x31016000
; GFX11-NEXT: s_mov_b32 s2, -1
@@ -1788,6 +2309,43 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: mul32_in_branch:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b64 s[2:3], s[0:1], 0x34
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_cmp_lg_u32 s2, 0
+; GFX12-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX12-NEXT: ; %bb.1: ; %else
+; GFX12-NEXT: s_mul_i32 s5, s2, s3
+; GFX12-NEXT: s_branch .LBB15_3
+; GFX12-NEXT: .LBB15_2:
+; GFX12-NEXT: s_mov_b32 s4, -1
+; GFX12-NEXT: ; implicit-def: $sgpr5
+; GFX12-NEXT: .LBB15_3: ; %Flow
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_vccnz .LBB15_5
+; GFX12-NEXT: ; %bb.4: ; %if
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s4, s2
+; GFX12-NEXT: s_mov_b32 s5, s3
+; GFX12-NEXT: buffer_load_b32 v0, off, s[4:7], null
+; GFX12-NEXT: s_branch .LBB15_6
+; GFX12-NEXT: .LBB15_5:
+; GFX12-NEXT: v_mov_b32_e32 v0, s5
+; GFX12-NEXT: .LBB15_6: ; %endif
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s3, 0x31016000
+; GFX12-NEXT: s_mov_b32 s2, -1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: mul32_in_branch:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU_PUSH_BEFORE 3, @14, KC0[CB0:0-32], KC1[]
@@ -1850,7 +2408,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
; SI-NEXT: s_and_b64 vcc, exec, s[10:11]
-; SI-NEXT: s_cbranch_vccz .LBB14_4
+; SI-NEXT: s_cbranch_vccz .LBB16_4
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mul_hi_u32 v0, s4, v0
@@ -1861,22 +2419,22 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: v_add_i32_e32 v1, vcc, s5, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; SI-NEXT: s_cbranch_vccnz .LBB14_3
-; SI-NEXT: .LBB14_2: ; %if
+; SI-NEXT: s_cbranch_vccnz .LBB16_3
+; SI-NEXT: .LBB16_2: ; %if
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; SI-NEXT: .LBB14_3: ; %endif
+; SI-NEXT: .LBB16_3: ; %endif
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB14_4:
+; SI-NEXT: .LBB16_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB14_2
+; SI-NEXT: s_branch .LBB16_2
;
; VI-LABEL: mul64_in_branch:
; VI: ; %bb.0: ; %entry
@@ -1884,7 +2442,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_mov_b64 s[8:9], 0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u64 s[4:5], 0
-; VI-NEXT: s_cbranch_scc0 .LBB14_4
+; VI-NEXT: s_cbranch_scc0 .LBB16_4
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: v_mov_b32_e32 v0, s6
; VI-NEXT: v_mad_u64_u32 v[0:1], s[10:11], s4, v0, 0
@@ -1893,22 +2451,22 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_mul_i32 s4, s5, s6
; VI-NEXT: v_add_u32_e32 v1, vcc, s4, v1
; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; VI-NEXT: s_cbranch_vccnz .LBB14_3
-; VI-NEXT: .LBB14_2: ; %if
+; VI-NEXT: s_cbranch_vccnz .LBB16_3
+; VI-NEXT: .LBB16_2: ; %if
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; VI-NEXT: .LBB14_3: ; %endif
+; VI-NEXT: .LBB16_3: ; %endif
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB14_4:
+; VI-NEXT: .LBB16_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB14_2
+; VI-NEXT: s_branch .LBB16_2
;
; GFX9-LABEL: mul64_in_branch:
; GFX9: ; %bb.0: ; %entry
@@ -1916,7 +2474,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_mov_b64 s[8:9], 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB14_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB16_3
; GFX9-NEXT: ; %bb.1: ; %else
; GFX9-NEXT: s_mul_i32 s7, s4, s7
; GFX9-NEXT: s_mul_hi_u32 s10, s4, s6
@@ -1925,21 +2483,21 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_add_i32 s5, s7, s5
; GFX9-NEXT: s_mul_i32 s4, s4, s6
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; GFX9-NEXT: s_cbranch_vccnz .LBB14_4
-; GFX9-NEXT: .LBB14_2: ; %if
+; GFX9-NEXT: s_cbranch_vccnz .LBB16_4
+; GFX9-NEXT: .LBB16_2: ; %if
; GFX9-NEXT: s_mov_b32 s7, 0xf000
; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_mov_b32 s4, s2
; GFX9-NEXT: s_mov_b32 s5, s3
; GFX9-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX9-NEXT: s_branch .LBB14_5
-; GFX9-NEXT: .LBB14_3:
+; GFX9-NEXT: s_branch .LBB16_5
+; GFX9-NEXT: .LBB16_3:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB14_2
-; GFX9-NEXT: .LBB14_4:
+; GFX9-NEXT: s_branch .LBB16_2
+; GFX9-NEXT: .LBB16_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: .LBB14_5: ; %endif
+; GFX9-NEXT: .LBB16_5: ; %endif
; GFX9-NEXT: s_mov_b32 s3, 0xf000
; GFX9-NEXT: s_mov_b32 s2, -1
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -1951,7 +2509,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX10-NEXT: s_cbranch_scc0 .LBB14_3
+; GFX10-NEXT: s_cbranch_scc0 .LBB16_3
; GFX10-NEXT: ; %bb.1: ; %else
; GFX10-NEXT: s_mul_i32 s7, s4, s7
; GFX10-NEXT: s_mul_hi_u32 s8, s4, s6
@@ -1960,22 +2518,22 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_mul_i32 s4, s4, s6
; GFX10-NEXT: s_add_i32 s5, s7, s5
; GFX10-NEXT: s_mov_b32 s6, 0
-; GFX10-NEXT: s_cbranch_execnz .LBB14_4
-; GFX10-NEXT: .LBB14_2: ; %if
+; GFX10-NEXT: s_cbranch_execnz .LBB16_4
+; GFX10-NEXT: .LBB16_2: ; %if
; GFX10-NEXT: s_mov_b32 s7, 0x31016000
; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: s_mov_b32 s4, s2
; GFX10-NEXT: s_mov_b32 s5, s3
; GFX10-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX10-NEXT: s_branch .LBB14_5
-; GFX10-NEXT: .LBB14_3:
+; GFX10-NEXT: s_branch .LBB16_5
+; GFX10-NEXT: .LBB16_3:
; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX10-NEXT: s_branch .LBB14_2
-; GFX10-NEXT: .LBB14_4:
+; GFX10-NEXT: s_branch .LBB16_2
+; GFX10-NEXT: .LBB16_4:
; GFX10-NEXT: v_mov_b32_e32 v0, s4
; GFX10-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-NEXT: .LBB14_5: ; %endif
+; GFX10-NEXT: .LBB16_5: ; %endif
; GFX10-NEXT: s_mov_b32 s3, 0x31016000
; GFX10-NEXT: s_mov_b32 s2, -1
; GFX10-NEXT: s_waitcnt vmcnt(0)
@@ -1987,7 +2545,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_load_b256 s[0:7], s[0:1], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB14_3
+; GFX11-NEXT: s_cbranch_scc0 .LBB16_3
; GFX11-NEXT: ; %bb.1: ; %else
; GFX11-NEXT: s_mul_i32 s7, s4, s7
; GFX11-NEXT: s_mul_hi_u32 s8, s4, s6
@@ -1996,21 +2554,21 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_mul_i32 s4, s4, s6
; GFX11-NEXT: s_add_i32 s5, s7, s5
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_execnz .LBB14_4
-; GFX11-NEXT: .LBB14_2: ; %if
+; GFX11-NEXT: s_cbranch_execnz .LBB16_4
+; GFX11-NEXT: .LBB16_2: ; %if
; GFX11-NEXT: s_mov_b32 s7, 0x31016000
; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_mov_b32 s4, s2
; GFX11-NEXT: s_mov_b32 s5, s3
; GFX11-NEXT: buffer_load_b64 v[0:1], off, s[4:7], 0
-; GFX11-NEXT: s_branch .LBB14_5
-; GFX11-NEXT: .LBB14_3:
+; GFX11-NEXT: s_branch .LBB16_5
+; GFX11-NEXT: .LBB16_3:
; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB14_2
-; GFX11-NEXT: .LBB14_4:
+; GFX11-NEXT: s_branch .LBB16_2
+; GFX11-NEXT: .LBB16_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
-; GFX11-NEXT: .LBB14_5: ; %endif
+; GFX11-NEXT: .LBB16_5: ; %endif
; GFX11-NEXT: s_mov_b32 s3, 0x31016000
; GFX11-NEXT: s_mov_b32 s2, -1
; GFX11-NEXT: s_waitcnt vmcnt(0)
@@ -2019,6 +2577,38 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: mul64_in_branch:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b256 s[0:7], s[0:1], 0x24
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX12-NEXT: s_cbranch_scc0 .LBB16_3
+; GFX12-NEXT: ; %bb.1: ; %else
+; GFX12-NEXT: s_mul_u64 s[4:5], s[4:5], s[6:7]
+; GFX12-NEXT: s_mov_b32 s6, 0
+; GFX12-NEXT: s_cbranch_execnz .LBB16_4
+; GFX12-NEXT: .LBB16_2: ; %if
+; GFX12-NEXT: s_mov_b32 s7, 0x31016000
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: s_mov_b32 s4, s2
+; GFX12-NEXT: s_mov_b32 s5, s3
+; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[4:7], null
+; GFX12-NEXT: s_branch .LBB16_5
+; GFX12-NEXT: .LBB16_3:
+; GFX12-NEXT: s_mov_b32 s6, -1
+; GFX12-NEXT: ; implicit-def: $sgpr4_sgpr5
+; GFX12-NEXT: s_branch .LBB16_2
+; GFX12-NEXT: .LBB16_4:
+; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX12-NEXT: .LBB16_5: ; %endif
+; GFX12-NEXT: s_mov_b32 s3, 0x31016000
+; GFX12-NEXT: s_mov_b32 s2, -1
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: buffer_store_b64 v[0:1], off, s[0:3], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: mul64_in_branch:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU_PUSH_BEFORE 4, @14, KC0[CB0:0-32], KC1[]
@@ -2324,6 +2914,51 @@ define amdgpu_kernel void @s_mul_i128(ptr addrspace(1) %out, [8 x i32], i128 %a,
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: s_mul_i128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b128 s[4:7], s[0:1], 0x7c
+; GFX12-NEXT: s_load_b128 s[8:11], s[0:1], 0x4c
+; GFX12-NEXT: s_mov_b32 s3, 0
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX12-NEXT: s_mov_b32 s15, s3
+; GFX12-NEXT: s_mov_b32 s13, s3
+; GFX12-NEXT: s_mov_b32 s17, s3
+; GFX12-NEXT: s_mov_b32 s19, s3
+; GFX12-NEXT: s_mov_b32 s24, s3
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_mov_b32 s2, s4
+; GFX12-NEXT: s_mov_b32 s14, s8
+; GFX12-NEXT: s_mov_b32 s12, s9
+; GFX12-NEXT: s_mul_u64 s[22:23], s[14:15], s[2:3]
+; GFX12-NEXT: s_mul_u64 s[20:21], s[12:13], s[2:3]
+; GFX12-NEXT: s_mov_b32 s2, s23
+; GFX12-NEXT: s_mov_b32 s16, s5
+; GFX12-NEXT: s_mul_u64 s[4:5], s[4:5], s[10:11]
+; GFX12-NEXT: s_add_nc_u64 s[10:11], s[20:21], s[2:3]
+; GFX12-NEXT: s_mul_u64 s[6:7], s[6:7], s[8:9]
+; GFX12-NEXT: s_mul_u64 s[8:9], s[14:15], s[16:17]
+; GFX12-NEXT: s_mov_b32 s2, s11
+; GFX12-NEXT: s_mov_b32 s11, s3
+; GFX12-NEXT: s_add_nc_u64 s[4:5], s[6:7], s[4:5]
+; GFX12-NEXT: s_add_nc_u64 s[6:7], s[8:9], s[10:11]
+; GFX12-NEXT: s_mul_u64 s[12:13], s[12:13], s[16:17]
+; GFX12-NEXT: s_mov_b32 s18, s7
+; GFX12-NEXT: s_mov_b32 s23, s3
+; GFX12-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[18:19]
+; GFX12-NEXT: s_mov_b32 s25, s6
+; GFX12-NEXT: s_add_nc_u64 s[2:3], s[12:13], s[2:3]
+; GFX12-NEXT: s_or_b64 s[6:7], s[22:23], s[24:25]
+; GFX12-NEXT: s_add_nc_u64 s[2:3], s[2:3], s[4:5]
+; GFX12-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s7
+; GFX12-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
+; GFX12-NEXT: s_mov_b32 s3, 0x31016000
+; GFX12-NEXT: s_mov_b32 s2, -1
+; GFX12-NEXT: buffer_store_b128 v[0:3], off, s[0:3], null
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: s_mul_i128:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 41, @4, KC0[CB0:0-32], KC1[]
@@ -2570,6 +3205,44 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-NEXT: s_endpgm
;
+; GFX12-LABEL: v_mul_i128:
+; GFX12: ; %bb.0: ; %entry
+; GFX12-NEXT: s_load_b128 s[0:3], s[0:1], 0x2c
+; GFX12-NEXT: v_dual_mov_b32 v10, 0 :: v_dual_lshlrev_b32 v15, 4, v0
+; GFX12-NEXT: s_waitcnt lgkmcnt(0)
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: global_load_b128 v[0:3], v15, s[0:1]
+; GFX12-NEXT: global_load_b128 v[4:7], v15, s[2:3]
+; GFX12-NEXT: s_waitcnt vmcnt(0)
+; GFX12-NEXT: v_mad_co_u64_u32 v[8:9], null, v0, v4, 0
+; GFX12-NEXT: v_mul_lo_u32 v14, v5, v2
+; GFX12-NEXT: v_mul_lo_u32 v3, v4, v3
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_mad_co_u64_u32 v[11:12], null, v1, v4, v[9:10]
+; GFX12-NEXT: v_dual_mov_b32 v13, v12 :: v_dual_mov_b32 v12, v10
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_mad_co_u64_u32 v[9:10], null, v0, v5, v[11:12]
+; GFX12-NEXT: v_mad_co_u64_u32 v[11:12], null, v4, v2, 0
+; GFX12-NEXT: v_mul_lo_u32 v4, v6, v1
+; GFX12-NEXT: v_mov_b32_e32 v2, v10
+; GFX12-NEXT: v_mul_lo_u32 v10, v7, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_add3_u32 v12, v12, v3, v14
+; GFX12-NEXT: v_add_co_u32 v2, s0, v13, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s0
+; GFX12-NEXT: v_mad_co_u64_u32 v[13:14], null, v6, v0, v[11:12]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_mad_co_u64_u32 v[6:7], null, v1, v5, v[2:3]
+; GFX12-NEXT: v_add3_u32 v0, v10, v14, v4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_co_u32 v10, vcc_lo, v6, v13
+; GFX12-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, v7, v0, vcc_lo
+; GFX12-NEXT: global_store_b128 v15, v[8:11], s[2:3]
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+;
; EG-LABEL: v_mul_i128:
; EG: ; %bb.0: ; %entry
; EG-NEXT: ALU 3, @10, KC0[CB0:0-32], KC1[]
@@ -2672,6 +3345,12 @@ define i32 @mul_pow2_plus_1(i32 %val) {
; GFX11-NEXT: v_lshl_add_u32 v0, v0, 3, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
+; GFX12-LABEL: mul_pow2_plus_1:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX12-NEXT: v_lshl_add_u32 v0, v0, 3, v0
+; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
; EG-LABEL: mul_pow2_plus_1:
; EG: ; %bb.0:
; EG-NEXT: CF_END
diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-global-inv-wb.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-global-inv-wb.mir
new file mode 100644
index 000000000000..c06e931c65d5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-global-inv-wb.mir
@@ -0,0 +1,29 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-waitcnts -o - %s | FileCheck -check-prefix=GFX12 %s
+
+# Check that we correctly track that GLOBAL_INV increases LOAD_cnt.
+# We use a straightforward dependency between a GLOBAL_LOAD and an instruction
+# that uses its result - the S_WAIT_LOADCNT introduced before the use should
+# reflect the fact that there is a GLOBAL_INV between them.
+# FIXME: We could get away with a S_WAIT_LOADCNT 1 here.
+---
+name: waitcnt-global-inv
+machineFunctionInfo:
+ isEntryFunction: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $sgpr2_sgpr3
+
+ ; GFX12-LABEL: name: waitcnt-global-inv
+ ; GFX12: liveins: $vgpr0, $vgpr1, $sgpr2_sgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: renamable $vgpr0 = GLOBAL_LOAD_DWORD_SADDR renamable $sgpr2_sgpr3, killed $vgpr0, 0, 0, implicit $exec :: (load (s32), addrspace 1)
+ ; GFX12-NEXT: GLOBAL_INV 16, implicit $exec
+ ; GFX12-NEXT: S_WAITCNT 1015
+ ; GFX12-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec, implicit $exec
+ renamable $vgpr0 = GLOBAL_LOAD_DWORD_SADDR renamable $sgpr2_sgpr3, killed $vgpr0, 0, 0, implicit $exec :: (load (s32), addrspace 1)
+ GLOBAL_INV 16, implicit $exec
+ $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec, implicit $exec
+...
+
+# TODO: Test for GLOBAL_WB, GLOBAL_WBINV
diff --git a/llvm/test/CodeGen/LoongArch/global-variable-code-model.ll b/llvm/test/CodeGen/LoongArch/global-variable-code-model.ll
new file mode 100644
index 000000000000..aa4780834ac3
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/global-variable-code-model.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s
+
+@a= external dso_local global i32, code_model "small", align 4
+
+define dso_local signext i32 @local_small() #0 {
+; CHECK-LABEL: local_small:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(a)
+; CHECK-NEXT: addi.d $a0, $a0, %pc_lo12(a)
+; CHECK-NEXT: ld.w $a0, $a0, 0
+; CHECK-NEXT: ret
+ %1 = load i32, ptr @a, align 4
+ ret i32 %1
+}
+
+@b= external dso_local global i32, code_model "large", align 4
+
+define dso_local signext i32 @local_large() #0 {
+; CHECK-LABEL: local_large:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %pc_hi20(b)
+; CHECK-NEXT: addi.d $t8, $zero, %pc_lo12(b)
+; CHECK-NEXT: lu32i.d $t8, %pc64_lo20(b)
+; CHECK-NEXT: lu52i.d $t8, $t8, %pc64_hi12(b)
+; CHECK-NEXT: add.d $a0, $t8, $a0
+; CHECK-NEXT: ld.w $a0, $a0, 0
+; CHECK-NEXT: ret
+ %1 = load i32, ptr @b, align 4
+ ret i32 %1
+}
+
+@c= external global i32, code_model "large", align 4
+
+define dso_local signext i32 @non_local_large() #0 {
+; CHECK-LABEL: non_local_large:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pcalau12i $a0, %got_pc_hi20(c)
+; CHECK-NEXT: ld.d $a0, $a0, %got_pc_lo12(c)
+; CHECK-NEXT: ld.w $a0, $a0, 0
+; CHECK-NEXT: ret
+ %1 = load i32, ptr @c, align 4
+ ret i32 %1
+}
diff --git a/llvm/test/CodeGen/PowerPC/intrinsic-trap.ll b/llvm/test/CodeGen/PowerPC/intrinsic-trap.ll
new file mode 100644
index 000000000000..b02eb5d8fd27
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/intrinsic-trap.ll
@@ -0,0 +1,10 @@
+; REQUIRES: asserts
+; RUN: not --crash llc -verify-machineinstrs -mtriple=powerpc64le-- < %s 2>&1 | FileCheck %s
+; CHECK: Bad machine code: Non-terminator instruction after the first terminator
+
+define i32 @test() {
+ call void @llvm.trap()
+ ret i32 0
+}
+
+declare void @llvm.trap()
diff --git a/llvm/test/CodeGen/PowerPC/pr47155-47156.ll b/llvm/test/CodeGen/PowerPC/pr47155-47156.ll
index 26aa92e83f7a..02f287634578 100644
--- a/llvm/test/CodeGen/PowerPC/pr47155-47156.ll
+++ b/llvm/test/CodeGen/PowerPC/pr47155-47156.ll
@@ -9,9 +9,11 @@ define void @pr47155() {
; CHECK-NEXT: pr47155:%bb.0 entry
; CHECK: SU(0): INLINEASM &"mtlr 31"{{.*}}implicit-def early-clobber $lr
; CHECK: Successors:
+; CHECK-NEXT: SU(1): Out Latency=0
; CHECK-NEXT: SU(1): Ord Latency=0 Barrier
; CHECK-NEXT: SU(1): INLINEASM &"mtlr 31"{{.*}}implicit-def early-clobber $lr8
; CHECK: Predecessors:
+; CHECK-NEXT: SU(0): Out Latency=0
; CHECK-NEXT: SU(0): Ord Latency=0 Barrier
; CHECK-NEXT: ExitSU:
entry:
@@ -25,11 +27,13 @@ define void @pr47156(ptr %fn) {
; CHECK: ********** MI Scheduling **********
; CHECK-NEXT: pr47156:%bb.0 entry
; CHECK: SU(0): INLINEASM &"mtctr 31"{{.*}}implicit-def early-clobber $ctr
-; CHECK-NOT: Successors:
-; CHECK-NOT: Predecessors:
-; CHECK: SU(1): MTCTR8 renamable $x3, implicit-def $ctr8
; CHECK: Successors:
-; CHECK-NEXT: ExitSU:
+; CHECK-NEXT: SU(1): Out Latency=0
+; CHECK-NEXT: SU(1): MTCTR8 renamable $x3, implicit-def $ctr8
+; CHECK: Predecessors:
+; CHECK-NEXT: SU(0): Out Latency=0
+; CHECK-NEXT: Successors:
+; CHECK-NEXT: ExitSU:
; CHECK-NEXT: SU(2):
entry:
call void asm sideeffect "mtctr 31", "~{ctr}"()
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv32.mir
index 733fd128282e..721721cf361e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv32.mir
@@ -1,7 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -mattr=+zbb -run-pass=instruction-select \
-# RUN: -simplify-mir -verify-machineinstrs %s -o - \
-# RUN: | FileCheck -check-prefix=RV32I %s
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv32 -mattr=+zbkb -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
---
name: bswap_s32
@@ -9,11 +10,11 @@ legalized: true
regBankSelected: true
body: |
bb.0.entry:
- ; RV32I-LABEL: name: bswap_s32
- ; RV32I: [[COPY:%[0-9]+]]:gpr = COPY $x10
- ; RV32I-NEXT: [[REV8_RV32_:%[0-9]+]]:gpr = REV8_RV32 [[COPY]]
- ; RV32I-NEXT: $x10 = COPY [[REV8_RV32_]]
- ; RV32I-NEXT: PseudoRET implicit $x10
+ ; CHECK-LABEL: name: bswap_s32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[REV8_RV32_:%[0-9]+]]:gpr = REV8_RV32 [[COPY]]
+ ; CHECK-NEXT: $x10 = COPY [[REV8_RV32_]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
%0:gprb(s32) = COPY $x10
%1:gprb(s32) = G_BSWAP %0
$x10 = COPY %1(s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv64.mir
index 053abef93a3a..6cdfb76f0b47 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/bswap-rv64.mir
@@ -1,7 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv64 -mattr=+zbb -run-pass=instruction-select \
-# RUN: -simplify-mir -verify-machineinstrs %s -o - \
-# RUN: | FileCheck -check-prefix=RV64I %s
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+zbkb -run-pass=instruction-select \
+# RUN: -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
---
name: bswap_s64
@@ -9,11 +10,11 @@ legalized: true
regBankSelected: true
body: |
bb.0.entry:
- ; RV64I-LABEL: name: bswap_s64
- ; RV64I: [[COPY:%[0-9]+]]:gpr = COPY $x10
- ; RV64I-NEXT: [[REV8_RV64_:%[0-9]+]]:gpr = REV8_RV64 [[COPY]]
- ; RV64I-NEXT: $x10 = COPY [[REV8_RV64_]]
- ; RV64I-NEXT: PseudoRET implicit $x10
+ ; CHECK-LABEL: name: bswap_s64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[REV8_RV64_:%[0-9]+]]:gpr = REV8_RV64 [[COPY]]
+ ; CHECK-NEXT: $x10 = COPY [[REV8_RV64_]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
%0:gprb(s64) = COPY $x10
%1:gprb(s64) = G_BSWAP %0
$x10 = COPY %1(s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll
index 226f2eb976e7..b87cc7869a46 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-common.ll
@@ -40,7 +40,7 @@ define i32 @caller_double_in_regs() nounwind {
; ILP32-NEXT: $x10 = COPY [[C]](s32)
; ILP32-NEXT: $x11 = COPY [[UV]](s32)
; ILP32-NEXT: $x12 = COPY [[UV1]](s32)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: $x10 = COPY [[COPY]](s32)
@@ -55,7 +55,7 @@ define i32 @caller_double_in_regs() nounwind {
; ILP32F-NEXT: $x10 = COPY [[C]](s32)
; ILP32F-NEXT: $x11 = COPY [[UV]](s32)
; ILP32F-NEXT: $x12 = COPY [[UV1]](s32)
- ; ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32F-NEXT: $x10 = COPY [[COPY]](s32)
@@ -79,7 +79,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; ILP32-LABEL: name: caller_small_scalar_ret
; ILP32: bb.1 (%ir-block.0):
; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -92,7 +92,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; ILP32F-LABEL: name: caller_small_scalar_ret
; ILP32F: bb.1 (%ir-block.0):
; ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index cc48392e9ea8..1a3489521af1 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -44,7 +44,7 @@ define i32 @caller_i64_in_regs() nounwind {
; ILP32-NEXT: $x10 = COPY [[C]](s32)
; ILP32-NEXT: $x11 = COPY [[UV]](s32)
; ILP32-NEXT: $x12 = COPY [[UV1]](s32)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @callee_i64_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_i64_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: $x10 = COPY [[COPY]](s32)
@@ -59,7 +59,7 @@ define i32 @caller_i64_in_regs() nounwind {
; ILP32F-NEXT: $x10 = COPY [[C]](s32)
; ILP32F-NEXT: $x11 = COPY [[UV]](s32)
; ILP32F-NEXT: $x12 = COPY [[UV1]](s32)
- ; ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_i64_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_i64_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32F-NEXT: $x10 = COPY [[COPY]](s32)
@@ -74,7 +74,7 @@ define i32 @caller_i64_in_regs() nounwind {
; ILP32D-NEXT: $x10 = COPY [[C]](s32)
; ILP32D-NEXT: $x11 = COPY [[UV]](s32)
; ILP32D-NEXT: $x12 = COPY [[UV1]](s32)
- ; ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_i64_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_i64_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32D-NEXT: $x10 = COPY [[COPY]](s32)
@@ -162,7 +162,7 @@ define i32 @caller_many_scalars() nounwind {
; ILP32-NEXT: $x15 = COPY [[C4]](s32)
; ILP32-NEXT: $x16 = COPY [[C5]](s32)
; ILP32-NEXT: $x17 = COPY [[UV2]](s32)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @callee_many_scalars, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; ILP32-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -198,7 +198,7 @@ define i32 @caller_many_scalars() nounwind {
; ILP32F-NEXT: $x15 = COPY [[C4]](s32)
; ILP32F-NEXT: $x16 = COPY [[C5]](s32)
; ILP32F-NEXT: $x17 = COPY [[UV2]](s32)
- ; ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_many_scalars, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+ ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; ILP32F-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; ILP32F-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -234,7 +234,7 @@ define i32 @caller_many_scalars() nounwind {
; ILP32D-NEXT: $x15 = COPY [[C4]](s32)
; ILP32D-NEXT: $x16 = COPY [[C5]](s32)
; ILP32D-NEXT: $x17 = COPY [[UV2]](s32)
- ; ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_many_scalars, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+ ; ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; ILP32D-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; ILP32D-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -261,7 +261,7 @@ define i32 @caller_small_scalar_ret() nounwind {
; ILP32: bb.1 (%ir-block.0):
; ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 987654321234567
; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -275,7 +275,7 @@ define i32 @caller_small_scalar_ret() nounwind {
; ILP32F: bb.1 (%ir-block.0):
; ILP32F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 987654321234567
; ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -289,7 +289,7 @@ define i32 @caller_small_scalar_ret() nounwind {
; ILP32D: bb.1 (%ir-block.0):
; ILP32D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 987654321234567
; ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -323,7 +323,7 @@ define i32 @caller_small_struct_ret() nounwind {
; ILP32-LABEL: name: caller_small_struct_ret
; ILP32: bb.1 (%ir-block.0):
; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
@@ -335,7 +335,7 @@ define i32 @caller_small_struct_ret() nounwind {
; ILP32F-LABEL: name: caller_small_struct_ret
; ILP32F: bb.1 (%ir-block.0):
; ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
@@ -347,7 +347,7 @@ define i32 @caller_small_struct_ret() nounwind {
; ILP32D-LABEL: name: caller_small_struct_ret
; ILP32D: bb.1 (%ir-block.0):
; ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
; ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
@@ -404,7 +404,7 @@ define i32 @caller_large_struct_ret() nounwind {
; ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @callee_large_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
@@ -419,7 +419,7 @@ define i32 @caller_large_struct_ret() nounwind {
; ILP32F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
- ; ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_large_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+ ; ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
; ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32F-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
; ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
@@ -434,7 +434,7 @@ define i32 @caller_large_struct_ret() nounwind {
; ILP32D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
- ; ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_large_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32D-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s32) from %ir.1)
; ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll
index 9426c77081e4..93649b5f60f7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32.ll
@@ -31,7 +31,7 @@ define i32 @caller_float_in_regs() nounwind {
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: $x10 = COPY [[C]](s32)
; RV32I-NEXT: $x11 = COPY [[C1]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: $x10 = COPY [[COPY]](s32)
@@ -94,7 +94,7 @@ define i32 @caller_float_on_stack() nounwind {
; RV32I-NEXT: $x15 = COPY [[UV5]](s32)
; RV32I-NEXT: $x16 = COPY [[UV6]](s32)
; RV32I-NEXT: $x17 = COPY [[UV7]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_on_stack, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_on_stack, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -116,7 +116,7 @@ define i32 @caller_tiny_scalar_ret() nounwind {
; RV32I-LABEL: name: caller_tiny_scalar_ret
; RV32I: bb.1 (%ir-block.0):
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_tiny_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @callee_tiny_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: $x10 = COPY [[COPY]](s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll
index 8aaf9abaf364..4d487eb5cda2 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32d.ll
@@ -30,7 +30,7 @@ define i32 @caller_double_in_fpr() nounwind {
; RV32-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: $x10 = COPY [[C]](s32)
; RV32-ILP32D-NEXT: $f10_d = COPY [[C1]](s64)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_fpr, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_d, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_fpr, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_d, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY]](s32)
@@ -96,7 +96,7 @@ define i32 @caller_double_in_fpr_exhausted_gprs() nounwind {
; RV32-ILP32D-NEXT: $x16 = COPY [[UV6]](s32)
; RV32-ILP32D-NEXT: $x17 = COPY [[UV7]](s32)
; RV32-ILP32D-NEXT: $f10_d = COPY [[C5]](s64)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_d, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_d, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -158,7 +158,7 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
; RV32-ILP32D-NEXT: $f17_d = COPY [[C7]](s64)
; RV32-ILP32D-NEXT: $x10 = COPY [[UV]](s32)
; RV32-ILP32D-NEXT: $x11 = COPY [[UV1]](s32)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit $f11_d, implicit $f12_d, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x10, implicit $x11, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit $f11_d, implicit $f12_d, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x10, implicit $x11, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY]](s32)
@@ -247,7 +247,7 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind
; RV32-ILP32D-NEXT: $f16_d = COPY [[C10]](s64)
; RV32-ILP32D-NEXT: $f17_d = COPY [[C11]](s64)
; RV32-ILP32D-NEXT: $x17 = COPY [[UV6]](s32)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x17, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x17, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -339,7 +339,7 @@ define i32 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ILP32D-NEXT: $f15_d = COPY [[C9]](s64)
; RV32-ILP32D-NEXT: $f16_d = COPY [[C10]](s64)
; RV32-ILP32D-NEXT: $f17_d = COPY [[C11]](s64)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $x17, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $x17, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -363,7 +363,7 @@ define i32 @caller_double_ret() nounwind {
; RV32-ILP32D-LABEL: name: caller_double_ret
; RV32-ILP32D: bb.1 (%ir-block.0):
; RV32-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_d
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_d
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
; RV32-ILP32D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll
index 9443b8b2fefd..a9c603bfdd74 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-ilp32f-ilp32d-common.ll
@@ -33,7 +33,7 @@ define i32 @caller_float_in_fpr() nounwind {
; RV32-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32F-NEXT: $x10 = COPY [[C]](s32)
; RV32-ILP32F-NEXT: $f10_f = COPY [[C1]](s32)
- ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_fpr, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $x10
+ ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_fpr, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $x10
; RV32-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32F-NEXT: $x10 = COPY [[COPY]](s32)
@@ -46,7 +46,7 @@ define i32 @caller_float_in_fpr() nounwind {
; RV32-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: $x10 = COPY [[C]](s32)
; RV32-ILP32D-NEXT: $f10_f = COPY [[C1]](s32)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_fpr, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_fpr, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_f, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY]](s32)
@@ -112,7 +112,7 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
; RV32-ILP32F-NEXT: $x16 = COPY [[UV6]](s32)
; RV32-ILP32F-NEXT: $x17 = COPY [[UV7]](s32)
; RV32-ILP32F-NEXT: $f10_f = COPY [[C5]](s32)
- ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_fpr_exhausted_gprs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_f, implicit-def $x10
+ ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_fpr_exhausted_gprs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_f, implicit-def $x10
; RV32-ILP32F-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32F-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -144,7 +144,7 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
; RV32-ILP32D-NEXT: $x16 = COPY [[UV6]](s32)
; RV32-ILP32D-NEXT: $x17 = COPY [[UV7]](s32)
; RV32-ILP32D-NEXT: $f10_f = COPY [[C5]](s32)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_f, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_f, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -202,7 +202,7 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
; RV32-ILP32F-NEXT: $f16_f = COPY [[C6]](s32)
; RV32-ILP32F-NEXT: $f17_f = COPY [[C7]](s32)
; RV32-ILP32F-NEXT: $x10 = COPY [[C8]](s32)
- ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_gpr_exhausted_fprs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $x10
+ ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_gpr_exhausted_fprs, csr_ilp32f_lp64f, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $x10
; RV32-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32F-NEXT: $x10 = COPY [[COPY]](s32)
@@ -229,7 +229,7 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
; RV32-ILP32D-NEXT: $f16_f = COPY [[C6]](s32)
; RV32-ILP32D-NEXT: $f17_f = COPY [[C7]](s32)
; RV32-ILP32D-NEXT: $x10 = COPY [[C8]](s32)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit $f11_f, implicit $f12_f, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit $x10, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY]](s32)
@@ -320,7 +320,7 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ILP32F-NEXT: $f15_f = COPY [[C9]](s32)
; RV32-ILP32F-NEXT: $f16_f = COPY [[C10]](s32)
; RV32-ILP32F-NEXT: $f17_f = COPY [[C11]](s32)
- ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_on_stack_exhausted_gprs_fprs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_f, implicit $x12, implicit $x13, implicit $f11_f, implicit $x14, implicit $x15, implicit $f12_f, implicit $x16, implicit $x17, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit-def $x10
+ ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_on_stack_exhausted_gprs_fprs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_f, implicit $x12, implicit $x13, implicit $f11_f, implicit $x14, implicit $x15, implicit $f12_f, implicit $x16, implicit $x17, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit-def $x10
; RV32-ILP32F-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32F-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -366,7 +366,7 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ILP32D-NEXT: $f15_f = COPY [[C9]](s32)
; RV32-ILP32D-NEXT: $f16_f = COPY [[C10]](s32)
; RV32-ILP32D-NEXT: $f17_f = COPY [[C11]](s32)
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_f, implicit $x12, implicit $x13, implicit $f11_f, implicit $x14, implicit $x15, implicit $f12_f, implicit $x16, implicit $x17, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit-def $x10
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_f, implicit $x12, implicit $x13, implicit $f11_f, implicit $x14, implicit $x15, implicit $f12_f, implicit $x16, implicit $x17, implicit $f13_f, implicit $f14_f, implicit $f15_f, implicit $f16_f, implicit $f17_f, implicit-def $x10
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 4, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x10
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY1]](s32)
@@ -390,7 +390,7 @@ define i32 @caller_float_ret() nounwind {
; RV32-ILP32F-LABEL: name: caller_float_ret
; RV32-ILP32F: bb.1 (%ir-block.0):
; RV32-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $f10_f
+ ; RV32-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $f10_f
; RV32-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32-ILP32F-NEXT: $x10 = COPY [[COPY]](s32)
@@ -399,7 +399,7 @@ define i32 @caller_float_ret() nounwind {
; RV32-ILP32D-LABEL: name: caller_float_ret
; RV32-ILP32D: bb.1 (%ir-block.0):
; RV32-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_f
+ ; RV32-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_f
; RV32-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; RV32-ILP32D-NEXT: $x10 = COPY [[COPY]](s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll
index 72f523f089da..e4d1d3132ea4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-common.ll
@@ -33,7 +33,7 @@ define i64 @caller_double_in_regs() nounwind {
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: $x10 = COPY [[C]](s64)
; LP64-NEXT: $x11 = COPY [[C1]](s64)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: $x10 = COPY [[COPY]](s64)
@@ -46,7 +46,7 @@ define i64 @caller_double_in_regs() nounwind {
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: $x10 = COPY [[C]](s64)
; LP64F-NEXT: $x11 = COPY [[C1]](s64)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: $x10 = COPY [[COPY]](s64)
@@ -68,7 +68,7 @@ define i64 @caller_double_ret() nounwind {
; LP64-LABEL: name: caller_double_ret
; LP64: bb.1 (%ir-block.0):
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: $x10 = COPY [[COPY]](s64)
@@ -77,7 +77,7 @@ define i64 @caller_double_ret() nounwind {
; LP64F-LABEL: name: caller_double_ret
; LP64F: bb.1 (%ir-block.0):
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: $x10 = COPY [[COPY]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
index d55c0140b831..b175b8d92e6c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -44,7 +44,7 @@ define i64 @caller_i128_in_regs() nounwind {
; LP64-NEXT: $x10 = COPY [[C]](s64)
; LP64-NEXT: $x11 = COPY [[UV]](s64)
; LP64-NEXT: $x12 = COPY [[UV1]](s64)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_i128_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_i128_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: $x10 = COPY [[COPY]](s64)
@@ -59,7 +59,7 @@ define i64 @caller_i128_in_regs() nounwind {
; LP64F-NEXT: $x10 = COPY [[C]](s64)
; LP64F-NEXT: $x11 = COPY [[UV]](s64)
; LP64F-NEXT: $x12 = COPY [[UV1]](s64)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_i128_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_i128_in_regs, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: $x10 = COPY [[COPY]](s64)
@@ -74,7 +74,7 @@ define i64 @caller_i128_in_regs() nounwind {
; LP64D-NEXT: $x10 = COPY [[C]](s64)
; LP64D-NEXT: $x11 = COPY [[UV]](s64)
; LP64D-NEXT: $x12 = COPY [[UV1]](s64)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_i128_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_i128_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: $x10 = COPY [[COPY]](s64)
@@ -171,7 +171,7 @@ define i32 @caller_many_scalars() nounwind {
; LP64-NEXT: $x15 = COPY [[ANYEXT3]](s64)
; LP64-NEXT: $x16 = COPY [[ANYEXT4]](s64)
; LP64-NEXT: $x17 = COPY [[UV2]](s64)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_many_scalars, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; LP64-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
@@ -213,7 +213,7 @@ define i32 @caller_many_scalars() nounwind {
; LP64F-NEXT: $x15 = COPY [[ANYEXT3]](s64)
; LP64F-NEXT: $x16 = COPY [[ANYEXT4]](s64)
; LP64F-NEXT: $x17 = COPY [[UV2]](s64)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_many_scalars, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; LP64F-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
@@ -255,7 +255,7 @@ define i32 @caller_many_scalars() nounwind {
; LP64D-NEXT: $x15 = COPY [[ANYEXT3]](s64)
; LP64D-NEXT: $x16 = COPY [[ANYEXT4]](s64)
; LP64D-NEXT: $x17 = COPY [[UV2]](s64)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_many_scalars, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_many_scalars, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit-def $x10
; LP64D-NEXT: ADJCALLSTACKUP 16, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
@@ -284,7 +284,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; LP64: bb.1 (%ir-block.0):
; LP64-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -2
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -298,7 +298,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; LP64F: bb.1 (%ir-block.0):
; LP64F-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -2
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -312,7 +312,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; LP64D: bb.1 (%ir-block.0):
; LP64D-NEXT: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 -2
; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_scalar_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_scalar_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -346,7 +346,7 @@ define i64 @caller_small_struct_ret() nounwind {
; LP64-LABEL: name: caller_small_struct_ret
; LP64: bb.1 (%ir-block.0):
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
@@ -358,7 +358,7 @@ define i64 @caller_small_struct_ret() nounwind {
; LP64F-LABEL: name: caller_small_struct_ret
; LP64F: bb.1 (%ir-block.0):
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit-def $x10, implicit-def $x11
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
@@ -370,7 +370,7 @@ define i64 @caller_small_struct_ret() nounwind {
; LP64D-LABEL: name: caller_small_struct_ret
; LP64D: bb.1 (%ir-block.0):
; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_small_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_small_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $x10, implicit-def $x11
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11
@@ -427,7 +427,7 @@ define i64 @caller_large_struct_ret() nounwind {
; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_large_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
@@ -442,7 +442,7 @@ define i64 @caller_large_struct_ret() nounwind {
; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_large_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
@@ -457,7 +457,7 @@ define i64 @caller_large_struct_ret() nounwind {
; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: $x10 = COPY [[FRAME_INDEX]](p0)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @callee_large_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @callee_large_struct_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.1)
; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
index 93b6747a779e..9283f1f090ed 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64.ll
@@ -37,7 +37,7 @@ define i64 @caller_float_in_regs() nounwind {
; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64I-NEXT: $x10 = COPY [[C]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: $x10 = COPY [[COPY]](s64)
@@ -51,7 +51,7 @@ define i64 @caller_float_in_regs() nounwind {
; RV64F-NEXT: $x10 = COPY [[C]](s64)
; RV64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64F-NEXT: $x11 = COPY [[ANYEXT]](s64)
- ; RV64F-NEXT: PseudoCALL target-flags(riscv-plt) @callee_float_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; RV64F-NEXT: PseudoCALL target-flags(riscv-call) @callee_float_in_regs, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; RV64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64F-NEXT: $x10 = COPY [[COPY]](s64)
@@ -74,7 +74,7 @@ define i64 @caller_tiny_scalar_ret() nounwind {
; RV64-LABEL: name: caller_tiny_scalar_ret
; RV64: bb.1 (%ir-block.0):
; RV64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64-NEXT: PseudoCALL target-flags(riscv-plt) @callee_tiny_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV64-NEXT: PseudoCALL target-flags(riscv-call) @callee_tiny_scalar_ret, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64d.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64d.ll
index 81ff2fcadc74..3d7ae6802fc4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64d.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calling-conv-lp64d.ll
@@ -30,7 +30,7 @@ define i64 @caller_double_in_regs() nounwind {
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: $x10 = COPY [[C]](s64)
; RV64I-NEXT: $f10_d = COPY [[C1]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_d, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_regs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $f10_d, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: $x10 = COPY [[COPY]](s64)
@@ -96,7 +96,7 @@ define i64 @caller_double_in_fpr_exhausted_gprs() nounwind {
; RV64I-NEXT: $x16 = COPY [[UV6]](s64)
; RV64I-NEXT: $x17 = COPY [[UV7]](s64)
; RV64I-NEXT: $f10_d = COPY [[C5]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_d, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_fpr_exhausted_gprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $f10_d, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: $x10 = COPY [[COPY1]](s64)
@@ -155,7 +155,7 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
; RV64I-NEXT: $f16_d = COPY [[C6]](s64)
; RV64I-NEXT: $f17_d = COPY [[C7]](s64)
; RV64I-NEXT: $x10 = COPY [[C8]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit $f11_d, implicit $f12_d, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x10, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_in_gpr_exhausted_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit $f11_d, implicit $f12_d, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit $x10, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
@@ -248,7 +248,7 @@ define i64 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
; RV64I-NEXT: $f15_d = COPY [[C9]](s64)
; RV64I-NEXT: $f16_d = COPY [[C10]](s64)
; RV64I-NEXT: $f17_d = COPY [[C11]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $x17, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_on_stack_exhausted_gprs_fprs, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $f10_d, implicit $x12, implicit $x13, implicit $f11_d, implicit $x14, implicit $x15, implicit $f12_d, implicit $x16, implicit $x17, implicit $f13_d, implicit $f14_d, implicit $f15_d, implicit $f16_d, implicit $f17_d, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 8, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: $x10 = COPY [[COPY1]](s64)
@@ -272,7 +272,7 @@ define i64 @caller_double_ret() nounwind {
; RV64I-LABEL: name: caller_double_ret
; RV64I: bb.1 (%ir-block.0):
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @callee_double_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_d
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @callee_double_ret, csr_ilp32d_lp64d, implicit-def $x1, implicit-def $f10_d
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
; RV64I-NEXT: $x10 = COPY [[COPY]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll
index e7e093f7110b..b06b539ded19 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/calls.ll
@@ -11,14 +11,14 @@ define void @test_call_void_noargs() {
; RV32I-LABEL: name: test_call_void_noargs
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_noargs, csr_ilp32_lp64, implicit-def $x1
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_noargs, csr_ilp32_lp64, implicit-def $x1
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: test_call_void_noargs
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_noargs, csr_ilp32_lp64, implicit-def $x1
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_noargs, csr_ilp32_lp64, implicit-def $x1
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -39,7 +39,7 @@ define void @test_call_void_args_i8() {
; RV32I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[C1]](s8)
; RV32I-NEXT: $x10 = COPY [[ANYEXT]](s32)
; RV32I-NEXT: $x11 = COPY [[ANYEXT1]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i8, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
@@ -52,7 +52,7 @@ define void @test_call_void_args_i8() {
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s8)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i8, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -73,7 +73,7 @@ define void @test_call_void_args_i8_zext() {
; RV32I-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[C1]](s8)
; RV32I-NEXT: $x10 = COPY [[ZEXT]](s32)
; RV32I-NEXT: $x11 = COPY [[ZEXT1]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i8_zext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8_zext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
@@ -86,7 +86,7 @@ define void @test_call_void_args_i8_zext() {
; RV64I-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[C1]](s8)
; RV64I-NEXT: $x10 = COPY [[ZEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ZEXT1]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i8_zext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i8_zext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -107,7 +107,7 @@ define void @test_call_void_args_i16_sext() {
; RV32I-NEXT: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[C1]](s16)
; RV32I-NEXT: $x10 = COPY [[SEXT]](s32)
; RV32I-NEXT: $x11 = COPY [[SEXT1]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i16_sext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i16_sext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
@@ -120,7 +120,7 @@ define void @test_call_void_args_i16_sext() {
; RV64I-NEXT: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[C1]](s16)
; RV64I-NEXT: $x10 = COPY [[SEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[SEXT1]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i16_sext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i16_sext, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -139,7 +139,7 @@ define void @test_call_void_args_i32() {
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: $x10 = COPY [[C]](s32)
; RV32I-NEXT: $x11 = COPY [[C1]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i32, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i32, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
@@ -152,7 +152,7 @@ define void @test_call_void_args_i32() {
; RV64I-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32)
; RV64I-NEXT: $x10 = COPY [[ANYEXT]](s64)
; RV64I-NEXT: $x11 = COPY [[ANYEXT1]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i32, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i32, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -175,7 +175,7 @@ define void @test_call_void_args_i64() {
; RV32I-NEXT: $x11 = COPY [[UV1]](s32)
; RV32I-NEXT: $x12 = COPY [[UV2]](s32)
; RV32I-NEXT: $x13 = COPY [[UV3]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i64, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i64, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
@@ -186,7 +186,7 @@ define void @test_call_void_args_i64() {
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: $x10 = COPY [[C]](s64)
; RV64I-NEXT: $x11 = COPY [[C1]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_args_i64, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_args_i64, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -201,7 +201,7 @@ define void @test_call_i8_noargs() {
; RV32I-LABEL: name: test_call_i8_noargs
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @i8_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i8_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
@@ -210,7 +210,7 @@ define void @test_call_i8_noargs() {
; RV64I-LABEL: name: test_call_i8_noargs
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @i8_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i8_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s64)
@@ -227,7 +227,7 @@ define void @test_call_i16_noargs() {
; RV32I-LABEL: name: test_call_i16_noargs
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @i16_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i16_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -236,7 +236,7 @@ define void @test_call_i16_noargs() {
; RV64I-LABEL: name: test_call_i16_noargs
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @i16_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i16_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s64)
@@ -253,7 +253,7 @@ define void @test_call_i32_noargs() {
; RV32I-LABEL: name: test_call_i32_noargs
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @i32_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i32_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: PseudoRET
@@ -261,7 +261,7 @@ define void @test_call_i32_noargs() {
; RV64I-LABEL: name: test_call_i32_noargs
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @i32_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i32_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
@@ -278,7 +278,7 @@ define void @test_call_i64_noargs() {
; RV32I-LABEL: name: test_call_i64_noargs
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @i64_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i64_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -288,7 +288,7 @@ define void @test_call_i64_noargs() {
; RV64I-LABEL: name: test_call_i64_noargs
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @i64_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i64_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: PseudoRET
@@ -303,7 +303,7 @@ define void @test_call_ptr_noargs() {
; RV32I-LABEL: name: test_call_ptr_noargs
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @ptr_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @ptr_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV32I-NEXT: PseudoRET
@@ -311,7 +311,7 @@ define void @test_call_ptr_noargs() {
; RV64I-LABEL: name: test_call_ptr_noargs
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @ptr_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @ptr_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
; RV64I-NEXT: PseudoRET
@@ -326,7 +326,7 @@ define void @test_call_i32x2_noargs() {
; RV32I-LABEL: name: test_call_i32x2_noargs
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @i32x2_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @i32x2_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -335,7 +335,7 @@ define void @test_call_i32x2_noargs() {
; RV64I-LABEL: name: test_call_i32x2_noargs
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @i32x2_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @i32x2_noargs, csr_ilp32_lp64, implicit-def $x1, implicit-def $x10, implicit-def $x11
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
@@ -358,7 +358,7 @@ define void @test_void_byval_args() {
; RV32I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: $x10 = COPY [[GV]](p0)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_byval_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_byval_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
@@ -367,7 +367,7 @@ define void @test_void_byval_args() {
; RV64I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: $x10 = COPY [[GV]](p0)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_byval_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_byval_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -383,7 +383,7 @@ define void @test_void_sret_args() {
; RV32I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: $x10 = COPY [[GV]](p0)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @void_sret_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @void_sret_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
@@ -392,7 +392,7 @@ define void @test_void_sret_args() {
; RV64I-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @foo
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: $x10 = COPY [[GV]](p0)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @void_sret_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @void_sret_args, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -406,14 +406,14 @@ define void @test_call_external() {
; RV32I-LABEL: name: test_call_external
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @external_function, csr_ilp32_lp64, implicit-def $x1
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @external_function, csr_ilp32_lp64, implicit-def $x1
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: test_call_external
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @external_function, csr_ilp32_lp64, implicit-def $x1
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @external_function, csr_ilp32_lp64, implicit-def $x1
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
@@ -427,14 +427,14 @@ define void @test_call_local() {
; RV32I-LABEL: name: test_call_local
; RV32I: bb.1.entry:
; RV32I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @dso_local_function, csr_ilp32_lp64, implicit-def $x1
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @dso_local_function, csr_ilp32_lp64, implicit-def $x1
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: test_call_local
; RV64I: bb.1.entry:
; RV64I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @dso_local_function, csr_ilp32_lp64, implicit-def $x1
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @dso_local_function, csr_ilp32_lp64, implicit-def $x1
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: PseudoRET
entry:
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
index ff30ebd3a8c7..d26b3ecff7d3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
@@ -164,7 +164,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; ILP32-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: $x10 = COPY [[VAARG]](s32)
; ILP32-NEXT: PseudoRET implicit $x10
@@ -209,7 +209,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV32D-ILP32-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
- ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32-NEXT: $x10 = COPY [[VAARG]](s32)
; RV32D-ILP32-NEXT: PseudoRET implicit $x10
@@ -254,7 +254,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV32D-ILP32F-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32F-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
- ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+ ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32F-NEXT: $x10 = COPY [[VAARG]](s32)
; RV32D-ILP32F-NEXT: PseudoRET implicit $x10
@@ -299,7 +299,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV32D-ILP32D-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1
; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32D-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
- ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32D-NEXT: $x10 = COPY [[VAARG]](s32)
; RV32D-ILP32D-NEXT: PseudoRET implicit $x10
@@ -345,7 +345,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LP64-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
@@ -392,7 +392,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LP64F-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
@@ -439,7 +439,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LP64D-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1
; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32)
; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
@@ -542,7 +542,7 @@ define void @va1_caller() nounwind {
; ILP32-NEXT: $x12 = COPY [[UV]](s32)
; ILP32-NEXT: $x13 = COPY [[UV1]](s32)
; ILP32-NEXT: $x14 = COPY [[C1]](s32)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: PseudoRET
@@ -558,7 +558,7 @@ define void @va1_caller() nounwind {
; RV32D-ILP32-NEXT: $x12 = COPY [[UV]](s32)
; RV32D-ILP32-NEXT: $x13 = COPY [[UV1]](s32)
; RV32D-ILP32-NEXT: $x14 = COPY [[C1]](s32)
- ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+ ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32-NEXT: PseudoRET
@@ -574,7 +574,7 @@ define void @va1_caller() nounwind {
; RV32D-ILP32F-NEXT: $x12 = COPY [[UV]](s32)
; RV32D-ILP32F-NEXT: $x13 = COPY [[UV1]](s32)
; RV32D-ILP32F-NEXT: $x14 = COPY [[C1]](s32)
- ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+ ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32F-NEXT: PseudoRET
@@ -590,7 +590,7 @@ define void @va1_caller() nounwind {
; RV32D-ILP32D-NEXT: $x12 = COPY [[UV]](s32)
; RV32D-ILP32D-NEXT: $x13 = COPY [[UV1]](s32)
; RV32D-ILP32D-NEXT: $x14 = COPY [[C1]](s32)
- ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
+ ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10
; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32D-NEXT: PseudoRET
@@ -605,7 +605,7 @@ define void @va1_caller() nounwind {
; LP64-NEXT: $x10 = COPY [[DEF]](p0)
; LP64-NEXT: $x11 = COPY [[C]](s64)
; LP64-NEXT: $x12 = COPY [[ANYEXT]](s64)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
@@ -621,7 +621,7 @@ define void @va1_caller() nounwind {
; LP64F-NEXT: $x10 = COPY [[DEF]](p0)
; LP64F-NEXT: $x11 = COPY [[C]](s64)
; LP64F-NEXT: $x12 = COPY [[ANYEXT]](s64)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
@@ -637,7 +637,7 @@ define void @va1_caller() nounwind {
; LP64D-NEXT: $x10 = COPY [[DEF]](p0)
; LP64D-NEXT: $x11 = COPY [[C]](s64)
; LP64D-NEXT: $x12 = COPY [[ANYEXT]](s64)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
@@ -842,7 +842,7 @@ define void @va2_caller() nounwind {
; ILP32-NEXT: $x10 = COPY [[DEF]](p0)
; ILP32-NEXT: $x12 = COPY [[UV]](s32)
; ILP32-NEXT: $x13 = COPY [[UV1]](s32)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -858,7 +858,7 @@ define void @va2_caller() nounwind {
; RV32D-ILP32-NEXT: $x10 = COPY [[DEF]](p0)
; RV32D-ILP32-NEXT: $x12 = COPY [[UV]](s32)
; RV32D-ILP32-NEXT: $x13 = COPY [[UV1]](s32)
- ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -874,7 +874,7 @@ define void @va2_caller() nounwind {
; RV32D-ILP32F-NEXT: $x10 = COPY [[DEF]](p0)
; RV32D-ILP32F-NEXT: $x12 = COPY [[UV]](s32)
; RV32D-ILP32F-NEXT: $x13 = COPY [[UV1]](s32)
- ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -890,7 +890,7 @@ define void @va2_caller() nounwind {
; RV32D-ILP32D-NEXT: $x10 = COPY [[DEF]](p0)
; RV32D-ILP32D-NEXT: $x12 = COPY [[UV]](s32)
; RV32D-ILP32D-NEXT: $x13 = COPY [[UV1]](s32)
- ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -904,7 +904,7 @@ define void @va2_caller() nounwind {
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: $x10 = COPY [[DEF]](p0)
; LP64-NEXT: $x11 = COPY [[C]](s64)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: PseudoRET
@@ -916,7 +916,7 @@ define void @va2_caller() nounwind {
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: $x10 = COPY [[DEF]](p0)
; LP64F-NEXT: $x11 = COPY [[C]](s64)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: PseudoRET
@@ -928,7 +928,7 @@ define void @va2_caller() nounwind {
; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: $x10 = COPY [[DEF]](p0)
; LP64D-NEXT: $x11 = COPY [[C]](s64)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: PseudoRET
@@ -1134,7 +1134,7 @@ define void @va3_caller() nounwind {
; ILP32-NEXT: $x12 = COPY [[UV1]](s32)
; ILP32-NEXT: $x14 = COPY [[UV2]](s32)
; ILP32-NEXT: $x15 = COPY [[UV3]](s32)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -1154,7 +1154,7 @@ define void @va3_caller() nounwind {
; RV32D-ILP32-NEXT: $x12 = COPY [[UV1]](s32)
; RV32D-ILP32-NEXT: $x14 = COPY [[UV2]](s32)
; RV32D-ILP32-NEXT: $x15 = COPY [[UV3]](s32)
- ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+ ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -1174,7 +1174,7 @@ define void @va3_caller() nounwind {
; RV32D-ILP32F-NEXT: $x12 = COPY [[UV1]](s32)
; RV32D-ILP32F-NEXT: $x14 = COPY [[UV2]](s32)
; RV32D-ILP32F-NEXT: $x15 = COPY [[UV3]](s32)
- ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+ ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -1194,7 +1194,7 @@ define void @va3_caller() nounwind {
; RV32D-ILP32D-NEXT: $x12 = COPY [[UV1]](s32)
; RV32D-ILP32D-NEXT: $x14 = COPY [[UV2]](s32)
; RV32D-ILP32D-NEXT: $x15 = COPY [[UV3]](s32)
- ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
+ ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11
; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -1211,7 +1211,7 @@ define void @va3_caller() nounwind {
; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64)
; LP64-NEXT: $x11 = COPY [[C1]](s64)
; LP64-NEXT: $x12 = COPY [[C2]](s64)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64-NEXT: PseudoRET
@@ -1226,7 +1226,7 @@ define void @va3_caller() nounwind {
; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64)
; LP64F-NEXT: $x11 = COPY [[C1]](s64)
; LP64F-NEXT: $x12 = COPY [[C2]](s64)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64F-NEXT: PseudoRET
@@ -1241,7 +1241,7 @@ define void @va3_caller() nounwind {
; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64)
; LP64D-NEXT: $x11 = COPY [[C1]](s64)
; LP64D-NEXT: $x12 = COPY [[C2]](s64)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: PseudoRET
@@ -1288,7 +1288,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; ILP32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: $x10 = COPY [[LOAD]](p0)
- ; ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; ILP32-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
; ILP32-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
@@ -1335,7 +1335,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32D-ILP32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32-NEXT: $x10 = COPY [[LOAD]](p0)
- ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
; RV32D-ILP32-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
@@ -1382,7 +1382,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32D-ILP32F-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32F-NEXT: $x10 = COPY [[LOAD]](p0)
- ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+ ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32F-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
; RV32D-ILP32F-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
@@ -1429,7 +1429,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32D-ILP32D-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs)
; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32D-NEXT: $x10 = COPY [[LOAD]](p0)
- ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32D-ILP32D-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
; RV32D-ILP32D-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
@@ -1477,7 +1477,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; LP64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: $x10 = COPY [[LOAD]](p0)
- ; LP64-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
+ ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10
; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
; LP64-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
@@ -1526,7 +1526,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: $x10 = COPY [[LOAD]](p0)
- ; LP64F-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
+ ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10
; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64F-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
; LP64F-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
@@ -1575,7 +1575,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4)
; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: $x10 = COPY [[LOAD]](p0)
- ; LP64D-NEXT: PseudoCALL target-flags(riscv-plt) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
+ ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
; LP64D-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/variadic-call.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/variadic-call.ll
index 27674ada9c28..7c156f5f01fd 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/variadic-call.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/variadic-call.ll
@@ -20,7 +20,7 @@ define i32 @main() {
; RV32I-NEXT: $x11 = COPY [[C1]](s32)
; RV32I-NEXT: $x12 = COPY [[C2]](s32)
; RV32I-NEXT: $x13 = COPY [[C3]](s32)
- ; RV32I-NEXT: PseudoCALL target-flags(riscv-plt) @foo, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10
+ ; RV32I-NEXT: PseudoCALL target-flags(riscv-call) @foo, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10
; RV32I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV32I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; RV32I-NEXT: $x10 = COPY [[COPY]](s32)
@@ -40,7 +40,7 @@ define i32 @main() {
; RV64I-NEXT: $x11 = COPY [[C2]](s64)
; RV64I-NEXT: $x12 = COPY [[C3]](s64)
; RV64I-NEXT: $x13 = COPY [[C4]](s64)
- ; RV64I-NEXT: PseudoCALL target-flags(riscv-plt) @foo, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10
+ ; RV64I-NEXT: PseudoCALL target-flags(riscv-call) @foo, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10
; RV64I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; RV64I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; RV64I-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s64) = G_ASSERT_SEXT [[COPY]], 32
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv32.mir
index e66dbfa4c821..d6598c89b39a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv32.mir
@@ -2,7 +2,9 @@
# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - \
# RUN: | FileCheck %s --check-prefix=RV32I
# RUN: llc -mtriple=riscv32 -mattr=+zbb -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s --check-prefix=RV32ZBB
+# RUN: | FileCheck %s --check-prefix=RV32ZBB_OR_RV32ZBKB
+# RUN: llc -mtriple=riscv32 -mattr=+zbkb -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefix=RV32ZBB_OR_RV32ZBKB
---
name: bswap_i16
@@ -23,16 +25,16 @@ body: |
; RV32I-NEXT: $x10 = COPY [[AND]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
- ; RV32ZBB-LABEL: name: bswap_i16
- ; RV32ZBB: liveins: $x10
- ; RV32ZBB-NEXT: {{ $}}
- ; RV32ZBB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32ZBB-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
- ; RV32ZBB-NEXT: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[ASSERT_ZEXT]]
- ; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; RV32ZBB-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BSWAP]], [[C]](s32)
- ; RV32ZBB-NEXT: $x10 = COPY [[LSHR]](s32)
- ; RV32ZBB-NEXT: PseudoRET implicit $x10
+ ; RV32ZBB_OR_RV32ZBKB-LABEL: name: bswap_i16
+ ; RV32ZBB_OR_RV32ZBKB: liveins: $x10
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: {{ $}}
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[ASSERT_ZEXT]]
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BSWAP]], [[C]](s32)
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: $x10 = COPY [[LSHR]](s32)
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: PseudoRET implicit $x10
%0:_(s32) = COPY $x10
%1:_(s32) = G_ASSERT_ZEXT %0, 16
%2:_(s16) = G_TRUNC %1(s32)
@@ -65,13 +67,13 @@ body: |
; RV32I-NEXT: $x10 = COPY [[OR2]](s32)
; RV32I-NEXT: PseudoRET implicit $x10
;
- ; RV32ZBB-LABEL: name: bswap_i32
- ; RV32ZBB: liveins: $x10
- ; RV32ZBB-NEXT: {{ $}}
- ; RV32ZBB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32ZBB-NEXT: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[COPY]]
- ; RV32ZBB-NEXT: $x10 = COPY [[BSWAP]](s32)
- ; RV32ZBB-NEXT: PseudoRET implicit $x10
+ ; RV32ZBB_OR_RV32ZBKB-LABEL: name: bswap_i32
+ ; RV32ZBB_OR_RV32ZBKB: liveins: $x10
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: {{ $}}
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[COPY]]
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: $x10 = COPY [[BSWAP]](s32)
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: PseudoRET implicit $x10
%0:_(s32) = COPY $x10
%1:_(s32) = G_BSWAP %0
$x10 = COPY %1(s32)
@@ -115,16 +117,16 @@ body: |
; RV32I-NEXT: $x11 = COPY [[OR5]](s32)
; RV32I-NEXT: PseudoRET implicit $x10, implicit $x11
;
- ; RV32ZBB-LABEL: name: bswap_i64
- ; RV32ZBB: liveins: $x10, $x11
- ; RV32ZBB-NEXT: {{ $}}
- ; RV32ZBB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
- ; RV32ZBB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
- ; RV32ZBB-NEXT: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[COPY1]]
- ; RV32ZBB-NEXT: [[BSWAP1:%[0-9]+]]:_(s32) = G_BSWAP [[COPY]]
- ; RV32ZBB-NEXT: $x10 = COPY [[BSWAP]](s32)
- ; RV32ZBB-NEXT: $x11 = COPY [[BSWAP1]](s32)
- ; RV32ZBB-NEXT: PseudoRET implicit $x10, implicit $x11
+ ; RV32ZBB_OR_RV32ZBKB-LABEL: name: bswap_i64
+ ; RV32ZBB_OR_RV32ZBKB: liveins: $x10, $x11
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: {{ $}}
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[COPY1]]
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: [[BSWAP1:%[0-9]+]]:_(s32) = G_BSWAP [[COPY]]
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: $x10 = COPY [[BSWAP]](s32)
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: $x11 = COPY [[BSWAP1]](s32)
+ ; RV32ZBB_OR_RV32ZBKB-NEXT: PseudoRET implicit $x10, implicit $x11
%0:_(s32) = COPY $x10
%1:_(s32) = COPY $x11
%2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir
index b73a22c1a07e..61a0de973984 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-bswap-rv64.mir
@@ -2,7 +2,9 @@
# RUN: llc -mtriple=riscv64 -run-pass=legalizer %s -o - \
# RUN: | FileCheck %s --check-prefix=RV64I
# RUN: llc -mtriple=riscv64 -mattr=+zbb -run-pass=legalizer %s -o - \
-# RUN: | FileCheck %s --check-prefix=RV64ZBB
+# RUN: | FileCheck %s --check-prefix=RV64ZBB_OR_RV64ZBKB
+# RUN: llc -mtriple=riscv64 -mattr=+zbkb -run-pass=legalizer %s -o - \
+# RUN: | FileCheck %s --check-prefix=RV64ZBB_OR_RV64ZBKB
---
name: bswap_i16
@@ -27,16 +29,16 @@ body: |
; RV64I-NEXT: $x10 = COPY [[AND]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
- ; RV64ZBB-LABEL: name: bswap_i16
- ; RV64ZBB: liveins: $x10
- ; RV64ZBB-NEXT: {{ $}}
- ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64ZBB-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 16
- ; RV64ZBB-NEXT: [[BSWAP:%[0-9]+]]:_(s64) = G_BSWAP [[ASSERT_ZEXT]]
- ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
- ; RV64ZBB-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[BSWAP]], [[C]](s64)
- ; RV64ZBB-NEXT: $x10 = COPY [[LSHR]](s64)
- ; RV64ZBB-NEXT: PseudoRET implicit $x10
+ ; RV64ZBB_OR_RV64ZBKB-LABEL: name: bswap_i16
+ ; RV64ZBB_OR_RV64ZBKB: liveins: $x10
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: {{ $}}
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 16
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[BSWAP:%[0-9]+]]:_(s64) = G_BSWAP [[ASSERT_ZEXT]]
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[BSWAP]], [[C]](s64)
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: PseudoRET implicit $x10
%0:_(s64) = COPY $x10
%1:_(s64) = G_ASSERT_ZEXT %0, 16
%2:_(s16) = G_TRUNC %1(s64)
@@ -74,16 +76,16 @@ body: |
; RV64I-NEXT: $x10 = COPY [[ZEXT]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
- ; RV64ZBB-LABEL: name: bswap_i32
- ; RV64ZBB: liveins: $x10
- ; RV64ZBB-NEXT: {{ $}}
- ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64ZBB-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 32
- ; RV64ZBB-NEXT: [[BSWAP:%[0-9]+]]:_(s64) = G_BSWAP [[ASSERT_ZEXT]]
- ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; RV64ZBB-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[BSWAP]], [[C]](s64)
- ; RV64ZBB-NEXT: $x10 = COPY [[LSHR]](s64)
- ; RV64ZBB-NEXT: PseudoRET implicit $x10
+ ; RV64ZBB_OR_RV64ZBKB-LABEL: name: bswap_i32
+ ; RV64ZBB_OR_RV64ZBKB: liveins: $x10
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: {{ $}}
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s64) = G_ASSERT_ZEXT [[COPY]], 32
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[BSWAP:%[0-9]+]]:_(s64) = G_BSWAP [[ASSERT_ZEXT]]
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[BSWAP]], [[C]](s64)
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[LSHR]](s64)
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: PseudoRET implicit $x10
%0:_(s64) = COPY $x10
%1:_(s64) = G_ASSERT_ZEXT %0, 32
%2:_(s32) = G_TRUNC %1(s64)
@@ -132,13 +134,13 @@ body: |
; RV64I-NEXT: $x10 = COPY [[OR6]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
- ; RV64ZBB-LABEL: name: bswap_i64
- ; RV64ZBB: liveins: $x10
- ; RV64ZBB-NEXT: {{ $}}
- ; RV64ZBB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
- ; RV64ZBB-NEXT: [[BSWAP:%[0-9]+]]:_(s64) = G_BSWAP [[COPY]]
- ; RV64ZBB-NEXT: $x10 = COPY [[BSWAP]](s64)
- ; RV64ZBB-NEXT: PseudoRET implicit $x10
+ ; RV64ZBB_OR_RV64ZBKB-LABEL: name: bswap_i64
+ ; RV64ZBB_OR_RV64ZBKB: liveins: $x10
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: {{ $}}
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: [[BSWAP:%[0-9]+]]:_(s64) = G_BSWAP [[COPY]]
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: $x10 = COPY [[BSWAP]](s64)
+ ; RV64ZBB_OR_RV64ZBKB-NEXT: PseudoRET implicit $x10
%0:_(s64) = COPY $x10
%1:_(s64) = G_BSWAP %0
$x10 = COPY %1(s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir
index 747d579f5070..4177a40e3826 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv32.mir
@@ -19,7 +19,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -63,7 +63,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -107,7 +107,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -145,7 +145,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -184,7 +184,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x12 = COPY %ylo(s32)
; CHECK-I-NEXT: $x13 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -208,7 +208,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[ASHR]](s32)
; CHECK-M-NEXT: $x12 = COPY %ylo(s32)
; CHECK-M-NEXT: $x13 = COPY [[ASHR1]](s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -245,7 +245,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s32)
; CHECK-I-NEXT: $x12 = COPY %lo2(s32)
; CHECK-I-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -263,7 +263,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s32)
; CHECK-M-NEXT: $x12 = COPY %lo2(s32)
; CHECK-M-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -297,7 +297,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s32)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -337,7 +337,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s32)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -377,7 +377,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s32)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -413,7 +413,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -454,7 +454,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
; CHECK-I-NEXT: $x12 = COPY [[AND2]](s32)
; CHECK-I-NEXT: $x13 = COPY [[AND3]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -480,7 +480,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[AND1]](s32)
; CHECK-M-NEXT: $x12 = COPY [[AND2]](s32)
; CHECK-M-NEXT: $x13 = COPY [[AND3]](s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -517,7 +517,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s32)
; CHECK-I-NEXT: $x12 = COPY %lo2(s32)
; CHECK-I-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -535,7 +535,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s32)
; CHECK-M-NEXT: $x12 = COPY %lo2(s32)
; CHECK-M-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir
index 09bb86bac45d..492f9530997c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-div-rv64.mir
@@ -19,7 +19,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -68,7 +68,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -117,7 +117,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -162,7 +162,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; CHECK-I-NEXT: $x11 = COPY [[SEXT_INREG1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -197,7 +197,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -236,7 +236,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x12 = COPY %ylo(s64)
; CHECK-I-NEXT: $x13 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -260,7 +260,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[ASHR]](s64)
; CHECK-M-NEXT: $x12 = COPY %ylo(s64)
; CHECK-M-NEXT: $x13 = COPY [[ASHR1]](s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -297,7 +297,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s64)
; CHECK-I-NEXT: $x12 = COPY %lo2(s64)
; CHECK-I-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -315,7 +315,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s64)
; CHECK-M-NEXT: $x12 = COPY %lo2(s64)
; CHECK-M-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__divti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -349,7 +349,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -392,7 +392,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -435,7 +435,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -478,7 +478,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -513,7 +513,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivdi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -554,7 +554,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
; CHECK-I-NEXT: $x12 = COPY [[AND2]](s64)
; CHECK-I-NEXT: $x13 = COPY [[AND3]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -580,7 +580,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[AND1]](s64)
; CHECK-M-NEXT: $x12 = COPY [[AND2]](s64)
; CHECK-M-NEXT: $x13 = COPY [[AND3]](s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -617,7 +617,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s64)
; CHECK-I-NEXT: $x12 = COPY %lo2(s64)
; CHECK-I-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -635,7 +635,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s64)
; CHECK-M-NEXT: $x12 = COPY %lo2(s64)
; CHECK-M-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__udivti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-ceil-floor.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-ceil-floor.mir
index 956989480c5b..1e184bd0c112 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-ceil-floor.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-fp-ceil-floor.mir
@@ -16,7 +16,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &ceilf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &ceilf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
@@ -39,7 +39,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $f10_f = COPY [[COPY]](s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &floorf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &floorf, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_f, implicit-def $f10_f
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $f10_f
; CHECK-NEXT: $f10_f = COPY [[COPY1]](s32)
@@ -62,7 +62,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &ceil, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &ceil, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
@@ -85,7 +85,7 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $f10_d
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $f10_d = COPY [[COPY]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &floor, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &floor, csr_ilp32d_lp64d, implicit-def $x1, implicit $f10_d, implicit-def $f10_d
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $f10_d
; CHECK-NEXT: $f10_d = COPY [[COPY1]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv32.mir
index 617471313356..1af5b686a526 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv32.mir
@@ -11,7 +11,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -36,7 +36,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -61,7 +61,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -86,7 +86,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -112,7 +112,7 @@ body: |
; CHECK-NEXT: $x11 = COPY %hi1(s32)
; CHECK-NEXT: $x12 = COPY %lo2(s32)
; CHECK-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -148,7 +148,7 @@ body: |
; CHECK-NEXT: $x11 = COPY [[ASHR]](s32)
; CHECK-NEXT: $x12 = COPY [[COPY1]](s32)
; CHECK-NEXT: $x13 = COPY [[ASHR1]](s32)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x11
; CHECK-NEXT: $x10 = COPY [[COPY2]](s32)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv64.mir
index fb41ee5bafc6..478a652dbf82 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mul-rv64.mir
@@ -11,7 +11,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -36,7 +36,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -61,7 +61,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -86,7 +86,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -111,7 +111,7 @@ body: |
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -137,7 +137,7 @@ body: |
; CHECK-NEXT: $x11 = COPY %hi1(s64)
; CHECK-NEXT: $x12 = COPY %lo2(s64)
; CHECK-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -173,7 +173,7 @@ body: |
; CHECK-NEXT: $x11 = COPY [[ASHR]](s64)
; CHECK-NEXT: $x12 = COPY [[COPY1]](s64)
; CHECK-NEXT: $x13 = COPY [[ASHR1]](s64)
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: $x10 = COPY [[COPY2]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv32.mir
index d0929fd19eb9..2e46893b8cf9 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv32.mir
@@ -46,7 +46,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[ASHR]](s32)
; LIBCALL-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
@@ -108,7 +108,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[ASHR]](s32)
; LIBCALL-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -164,7 +164,7 @@ body: |
; LIBCALL-NEXT: $x11 = COPY [[ASHR]](s32)
; LIBCALL-NEXT: $x12 = COPY [[COPY1]](s32)
; LIBCALL-NEXT: $x13 = COPY [[ASHR1]](s32)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; LIBCALL-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x11
@@ -222,7 +222,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[AND]](s32)
; LIBCALL-NEXT: $x11 = COPY [[AND1]](s32)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
@@ -278,7 +278,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[AND]](s32)
; LIBCALL-NEXT: $x11 = COPY [[AND1]](s32)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__mulsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
@@ -330,7 +330,7 @@ body: |
; LIBCALL-NEXT: $x11 = COPY [[C]](s32)
; LIBCALL-NEXT: $x12 = COPY [[COPY1]](s32)
; LIBCALL-NEXT: $x13 = COPY [[C1]](s32)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; LIBCALL-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv64.mir
index c2bf9ff2a61e..29f4458d5f7f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-mulo-rv64.mir
@@ -46,7 +46,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[ASHR]](s64)
; LIBCALL-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
@@ -108,7 +108,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[ASHR]](s64)
; LIBCALL-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
@@ -160,7 +160,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; LIBCALL-NEXT: $x11 = COPY [[SEXT_INREG1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY2]], 32
@@ -214,7 +214,7 @@ body: |
; LIBCALL-NEXT: $x11 = COPY [[ASHR]](s64)
; LIBCALL-NEXT: $x12 = COPY [[COPY1]](s64)
; LIBCALL-NEXT: $x13 = COPY [[ASHR1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x11
@@ -272,7 +272,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[AND]](s64)
; LIBCALL-NEXT: $x11 = COPY [[AND1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
@@ -328,7 +328,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[AND]](s64)
; LIBCALL-NEXT: $x11 = COPY [[AND1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 65535
@@ -384,7 +384,7 @@ body: |
; LIBCALL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: $x10 = COPY [[AND]](s64)
; LIBCALL-NEXT: $x11 = COPY [[AND1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__muldi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
@@ -436,7 +436,7 @@ body: |
; LIBCALL-NEXT: $x11 = COPY [[C]](s64)
; LIBCALL-NEXT: $x12 = COPY [[COPY1]](s64)
; LIBCALL-NEXT: $x13 = COPY [[C1]](s64)
- ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-plt) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; LIBCALL-NEXT: PseudoCALL target-flags(riscv-call) &__multi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; LIBCALL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LIBCALL-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; LIBCALL-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv32.mir
index cb7f0eaea59b..99ca07d954ff 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv32.mir
@@ -19,7 +19,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -63,7 +63,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -107,7 +107,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -145,7 +145,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__modsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -184,7 +184,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[ASHR]](s32)
; CHECK-I-NEXT: $x12 = COPY %ylo(s32)
; CHECK-I-NEXT: $x13 = COPY [[ASHR1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -208,7 +208,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[ASHR]](s32)
; CHECK-M-NEXT: $x12 = COPY %ylo(s32)
; CHECK-M-NEXT: $x13 = COPY [[ASHR1]](s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -245,7 +245,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s32)
; CHECK-I-NEXT: $x12 = COPY %lo2(s32)
; CHECK-I-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -263,7 +263,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s32)
; CHECK-M-NEXT: $x12 = COPY %lo2(s32)
; CHECK-M-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -297,7 +297,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s32)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -337,7 +337,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s32)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -377,7 +377,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s32)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -413,7 +413,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s32)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umodsi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s32)
@@ -454,7 +454,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s32)
; CHECK-I-NEXT: $x12 = COPY [[AND2]](s32)
; CHECK-I-NEXT: $x13 = COPY [[AND3]](s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -480,7 +480,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[AND1]](s32)
; CHECK-M-NEXT: $x12 = COPY [[AND2]](s32)
; CHECK-M-NEXT: $x13 = COPY [[AND3]](s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -517,7 +517,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s32)
; CHECK-I-NEXT: $x12 = COPY %lo2(s32)
; CHECK-I-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
@@ -535,7 +535,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s32)
; CHECK-M-NEXT: $x12 = COPY %lo2(s32)
; CHECK-M-NEXT: $x13 = COPY %hi2(s32)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv64.mir
index fb008bae9024..64458c40f446 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rem-rv64.mir
@@ -19,7 +19,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -68,7 +68,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -117,7 +117,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x11 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -162,7 +162,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; CHECK-I-NEXT: $x11 = COPY [[SEXT_INREG1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -197,7 +197,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__moddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -236,7 +236,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[ASHR]](s64)
; CHECK-I-NEXT: $x12 = COPY %ylo(s64)
; CHECK-I-NEXT: $x13 = COPY [[ASHR1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -260,7 +260,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[ASHR]](s64)
; CHECK-M-NEXT: $x12 = COPY %ylo(s64)
; CHECK-M-NEXT: $x13 = COPY [[ASHR1]](s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -297,7 +297,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s64)
; CHECK-I-NEXT: $x12 = COPY %lo2(s64)
; CHECK-I-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -315,7 +315,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s64)
; CHECK-M-NEXT: $x12 = COPY %lo2(s64)
; CHECK-M-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__modti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -349,7 +349,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -392,7 +392,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -435,7 +435,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -478,7 +478,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -513,7 +513,7 @@ body: |
; CHECK-I-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: $x10 = COPY [[COPY]](s64)
; CHECK-I-NEXT: $x11 = COPY [[COPY1]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umoddi3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: $x10 = COPY [[COPY2]](s64)
@@ -554,7 +554,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY [[AND1]](s64)
; CHECK-I-NEXT: $x12 = COPY [[AND2]](s64)
; CHECK-I-NEXT: $x13 = COPY [[AND3]](s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -580,7 +580,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY [[AND1]](s64)
; CHECK-M-NEXT: $x12 = COPY [[AND2]](s64)
; CHECK-M-NEXT: $x13 = COPY [[AND3]](s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -617,7 +617,7 @@ body: |
; CHECK-I-NEXT: $x11 = COPY %hi1(s64)
; CHECK-I-NEXT: $x12 = COPY %lo2(s64)
; CHECK-I-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-plt) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-I-NEXT: PseudoCALL target-flags(riscv-call) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-I-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-I-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-I-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
@@ -635,7 +635,7 @@ body: |
; CHECK-M-NEXT: $x11 = COPY %hi1(s64)
; CHECK-M-NEXT: $x12 = COPY %lo2(s64)
; CHECK-M-NEXT: $x13 = COPY %hi2(s64)
- ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-plt) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
+ ; CHECK-M-NEXT: PseudoCALL target-flags(riscv-call) &__umodti3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11
; CHECK-M-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; CHECK-M-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-M-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
index 501a3c0ce743..7b110e562e05 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll
@@ -167,7 +167,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV32-NEXT: andi a0, a0, -16
; RV32-NEXT: sub a0, sp, a0
; RV32-NEXT: mv sp, a0
-; RV32-NEXT: call notdead@plt
+; RV32-NEXT: call notdead
; RV32-NEXT: mv a0, s1
; RV32-NEXT: addi sp, s0, -16
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -204,7 +204,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV64-NEXT: andi a0, a0, -16
; RV64-NEXT: sub a0, sp, a0
; RV64-NEXT: mv sp, a0
-; RV64-NEXT: call notdead@plt
+; RV64-NEXT: call notdead
; RV64-NEXT: mv a0, s1
; RV64-NEXT: addi sp, s0, -32
; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -229,7 +229,7 @@ define void @va1_caller() nounwind {
; RV32-NEXT: lui a3, 261888
; RV32-NEXT: li a4, 2
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call va1@plt
+; RV32-NEXT: call va1
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -241,7 +241,7 @@ define void @va1_caller() nounwind {
; LP64-NEXT: lui a0, %hi(.LCPI3_0)
; LP64-NEXT: ld a1, %lo(.LCPI3_0)(a0)
; LP64-NEXT: li a2, 2
-; LP64-NEXT: call va1@plt
+; LP64-NEXT: call va1
; LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-NEXT: addi sp, sp, 16
; LP64-NEXT: ret
@@ -255,7 +255,7 @@ define void @va1_caller() nounwind {
; LP64F-NEXT: fmv.d.x fa5, a0
; LP64F-NEXT: li a2, 2
; LP64F-NEXT: fmv.x.d a1, fa5
-; LP64F-NEXT: call va1@plt
+; LP64F-NEXT: call va1
; LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64F-NEXT: addi sp, sp, 16
; LP64F-NEXT: ret
@@ -269,7 +269,7 @@ define void @va1_caller() nounwind {
; LP64D-NEXT: fmv.d.x fa5, a0
; LP64D-NEXT: li a2, 2
; LP64D-NEXT: fmv.x.d a1, fa5
-; LP64D-NEXT: call va1@plt
+; LP64D-NEXT: call va1
; LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64D-NEXT: addi sp, sp, 16
; LP64D-NEXT: ret
@@ -473,7 +473,7 @@ define void @va2_caller() nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a1, 1
-; RV32-NEXT: call va2@plt
+; RV32-NEXT: call va2
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -483,7 +483,7 @@ define void @va2_caller() nounwind {
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: li a1, 1
-; RV64-NEXT: call va2@plt
+; RV64-NEXT: call va2
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -701,7 +701,7 @@ define void @va3_caller() nounwind {
; RV32-NEXT: li a0, 2
; RV32-NEXT: li a1, 1111
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call va3@plt
+; RV32-NEXT: call va3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -714,7 +714,7 @@ define void @va3_caller() nounwind {
; RV64-NEXT: addiw a2, a0, -480
; RV64-NEXT: li a0, 2
; RV64-NEXT: li a1, 1111
-; RV64-NEXT: call va3@plt
+; RV64-NEXT: call va3
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -749,7 +749,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32-NEXT: lw s0, 0(a0)
; RV32-NEXT: sw a2, 0(a1)
; RV32-NEXT: lw a0, 0(sp)
-; RV32-NEXT: call notdead@plt
+; RV32-NEXT: call notdead
; RV32-NEXT: lw a0, 4(sp)
; RV32-NEXT: addi a0, a0, 3
; RV32-NEXT: andi a0, a0, -4
@@ -803,7 +803,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV64-NEXT: lwu a1, 0(sp)
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: call notdead@plt
+; RV64-NEXT: call notdead
; RV64-NEXT: ld a0, 8(sp)
; RV64-NEXT: addi a0, a0, 3
; RV64-NEXT: andi a0, a0, -4
diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll
index 7fe041a8ac6a..e55a57a51678 100644
--- a/llvm/test/CodeGen/RISCV/addrspacecast.ll
+++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll
@@ -26,7 +26,7 @@ define void @cast1(ptr %ptr) {
; RV32I-NEXT: .cfi_def_cfa_offset 16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -37,7 +37,7 @@ define void @cast1(ptr %ptr) {
; RV64I-NEXT: .cfi_def_cfa_offset 16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
-; RV64I-NEXT: call foo@plt
+; RV64I-NEXT: call foo
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
index 09803012001c..888ea666d713 100644
--- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll
+++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll
@@ -19,7 +19,7 @@ define void @quux(i32 signext %arg, i32 signext %arg1) nounwind {
; RV64I-NEXT: subw s0, a1, a0
; RV64I-NEXT: .LBB0_2: # %bb2
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: call hoge@plt
+; RV64I-NEXT: call hoge
; RV64I-NEXT: addiw s0, s0, -1
; RV64I-NEXT: bnez s0, .LBB0_2
; RV64I-NEXT: # %bb.3:
diff --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll
index 34cac429f305..bcb0592c18f5 100644
--- a/llvm/test/CodeGen/RISCV/alloca.ll
+++ b/llvm/test/CodeGen/RISCV/alloca.ll
@@ -18,7 +18,7 @@ define void @simple_alloca(i32 %n) nounwind {
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
-; RV32I-NEXT: call notdead@plt
+; RV32I-NEXT: call notdead
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -45,7 +45,7 @@ define void @scoped_alloca(i32 %n) nounwind {
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
-; RV32I-NEXT: call notdead@plt
+; RV32I-NEXT: call notdead
; RV32I-NEXT: mv sp, s1
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -91,7 +91,7 @@ define void @alloca_callframe(i32 %n) nounwind {
; RV32I-NEXT: li a6, 7
; RV32I-NEXT: li a7, 8
; RV32I-NEXT: sw t0, 0(sp)
-; RV32I-NEXT: call func@plt
+; RV32I-NEXT: call func
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/analyze-branch.ll b/llvm/test/CodeGen/RISCV/analyze-branch.ll
index e33e6b65c6f1..768a11a71706 100644
--- a/llvm/test/CodeGen/RISCV/analyze-branch.ll
+++ b/llvm/test/CodeGen/RISCV/analyze-branch.ll
@@ -20,13 +20,13 @@ define void @test_bcc_fallthrough_taken(i32 %in) nounwind {
; RV32I-NEXT: li a1, 42
; RV32I-NEXT: bne a0, a1, .LBB0_3
; RV32I-NEXT: # %bb.1: # %true
-; RV32I-NEXT: call test_true@plt
+; RV32I-NEXT: call test_true
; RV32I-NEXT: .LBB0_2: # %true
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB0_3: # %false
-; RV32I-NEXT: call test_false@plt
+; RV32I-NEXT: call test_false
; RV32I-NEXT: j .LBB0_2
%tst = icmp eq i32 %in, 42
br i1 %tst, label %true, label %false, !prof !0
@@ -52,13 +52,13 @@ define void @test_bcc_fallthrough_nottaken(i32 %in) nounwind {
; RV32I-NEXT: li a1, 42
; RV32I-NEXT: beq a0, a1, .LBB1_3
; RV32I-NEXT: # %bb.1: # %false
-; RV32I-NEXT: call test_false@plt
+; RV32I-NEXT: call test_false
; RV32I-NEXT: .LBB1_2: # %true
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB1_3: # %true
-; RV32I-NEXT: call test_true@plt
+; RV32I-NEXT: call test_true
; RV32I-NEXT: j .LBB1_2
%tst = icmp eq i32 %in, 42
br i1 %tst, label %true, label %false, !prof !1
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index eea4cb72938a..46ed01b11584 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -21,7 +21,7 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -57,7 +57,7 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -97,7 +97,7 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -156,7 +156,7 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -219,7 +219,7 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -278,7 +278,7 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -341,7 +341,7 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -400,7 +400,7 @@ define void @cmpxchg_i8_release_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -463,7 +463,7 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -522,7 +522,7 @@ define void @cmpxchg_i8_release_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -585,7 +585,7 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -644,7 +644,7 @@ define void @cmpxchg_i8_acq_rel_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -707,7 +707,7 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -766,7 +766,7 @@ define void @cmpxchg_i8_acq_rel_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -829,7 +829,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -865,7 +865,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -905,7 +905,7 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -941,7 +941,7 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -981,7 +981,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1017,7 +1017,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1057,7 +1057,7 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1094,7 +1094,7 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1135,7 +1135,7 @@ define void @cmpxchg_i16_acquire_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1196,7 +1196,7 @@ define void @cmpxchg_i16_acquire_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1261,7 +1261,7 @@ define void @cmpxchg_i16_acquire_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1322,7 +1322,7 @@ define void @cmpxchg_i16_acquire_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1387,7 +1387,7 @@ define void @cmpxchg_i16_release_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1448,7 +1448,7 @@ define void @cmpxchg_i16_release_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1513,7 +1513,7 @@ define void @cmpxchg_i16_release_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1574,7 +1574,7 @@ define void @cmpxchg_i16_release_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1639,7 +1639,7 @@ define void @cmpxchg_i16_acq_rel_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1700,7 +1700,7 @@ define void @cmpxchg_i16_acq_rel_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1765,7 +1765,7 @@ define void @cmpxchg_i16_acq_rel_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1826,7 +1826,7 @@ define void @cmpxchg_i16_acq_rel_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1891,7 +1891,7 @@ define void @cmpxchg_i16_seq_cst_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1928,7 +1928,7 @@ define void @cmpxchg_i16_seq_cst_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1969,7 +1969,7 @@ define void @cmpxchg_i16_seq_cst_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2006,7 +2006,7 @@ define void @cmpxchg_i16_seq_cst_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2047,7 +2047,7 @@ define void @cmpxchg_i16_seq_cst_seq_cst(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2084,7 +2084,7 @@ define void @cmpxchg_i16_seq_cst_seq_cst(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2125,7 +2125,7 @@ define void @cmpxchg_i32_monotonic_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounw
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2149,7 +2149,7 @@ define void @cmpxchg_i32_monotonic_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounw
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2178,7 +2178,7 @@ define void @cmpxchg_i32_acquire_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2213,7 +2213,7 @@ define void @cmpxchg_i32_acquire_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2254,7 +2254,7 @@ define void @cmpxchg_i32_acquire_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2289,7 +2289,7 @@ define void @cmpxchg_i32_acquire_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2330,7 +2330,7 @@ define void @cmpxchg_i32_release_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2365,7 +2365,7 @@ define void @cmpxchg_i32_release_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2406,7 +2406,7 @@ define void @cmpxchg_i32_release_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2441,7 +2441,7 @@ define void @cmpxchg_i32_release_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2482,7 +2482,7 @@ define void @cmpxchg_i32_acq_rel_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2517,7 +2517,7 @@ define void @cmpxchg_i32_acq_rel_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2558,7 +2558,7 @@ define void @cmpxchg_i32_acq_rel_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2593,7 +2593,7 @@ define void @cmpxchg_i32_acq_rel_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2634,7 +2634,7 @@ define void @cmpxchg_i32_seq_cst_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2658,7 +2658,7 @@ define void @cmpxchg_i32_seq_cst_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2687,7 +2687,7 @@ define void @cmpxchg_i32_seq_cst_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 2
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2711,7 +2711,7 @@ define void @cmpxchg_i32_seq_cst_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2740,7 +2740,7 @@ define void @cmpxchg_i32_seq_cst_seq_cst(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2764,7 +2764,7 @@ define void @cmpxchg_i32_seq_cst_seq_cst(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2796,7 +2796,7 @@ define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounw
; RV32I-NEXT: mv a3, a4
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2812,7 +2812,7 @@ define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounw
; RV32IA-NEXT: mv a3, a4
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -2825,7 +2825,7 @@ define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounw
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2857,7 +2857,7 @@ define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2874,7 +2874,7 @@ define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -2887,7 +2887,7 @@ define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2930,7 +2930,7 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a6
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2947,7 +2947,7 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a6
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -2960,7 +2960,7 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3003,7 +3003,7 @@ define void @cmpxchg_i64_release_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3020,7 +3020,7 @@ define void @cmpxchg_i64_release_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3033,7 +3033,7 @@ define void @cmpxchg_i64_release_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3076,7 +3076,7 @@ define void @cmpxchg_i64_release_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a6
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3093,7 +3093,7 @@ define void @cmpxchg_i64_release_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a6
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3106,7 +3106,7 @@ define void @cmpxchg_i64_release_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3149,7 +3149,7 @@ define void @cmpxchg_i64_acq_rel_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3166,7 +3166,7 @@ define void @cmpxchg_i64_acq_rel_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3179,7 +3179,7 @@ define void @cmpxchg_i64_acq_rel_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3222,7 +3222,7 @@ define void @cmpxchg_i64_acq_rel_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a6
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3239,7 +3239,7 @@ define void @cmpxchg_i64_acq_rel_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a6
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3252,7 +3252,7 @@ define void @cmpxchg_i64_acq_rel_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3295,7 +3295,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3312,7 +3312,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a5
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3325,7 +3325,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3357,7 +3357,7 @@ define void @cmpxchg_i64_seq_cst_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a6
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3374,7 +3374,7 @@ define void @cmpxchg_i64_seq_cst_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a6
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3387,7 +3387,7 @@ define void @cmpxchg_i64_seq_cst_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 2
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3419,7 +3419,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32I-NEXT: li a5, 5
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: mv a3, a6
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3436,7 +3436,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV32IA-NEXT: li a5, 5
; RV32IA-NEXT: mv a2, a3
; RV32IA-NEXT: mv a3, a6
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3449,7 +3449,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
index d3488ebed89f..2d1fc21cda89 100644
--- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
@@ -30,7 +30,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_1@plt
+; RV32I-NEXT: call __atomic_load_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -45,7 +45,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_1@plt
+; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -64,7 +64,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_1@plt
+; RV32I-NEXT: call __atomic_load_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -79,7 +79,7 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_1@plt
+; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -98,7 +98,7 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 2
-; RV32I-NEXT: call __atomic_load_1@plt
+; RV32I-NEXT: call __atomic_load_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -119,7 +119,7 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 2
-; RV64I-NEXT: call __atomic_load_1@plt
+; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -166,7 +166,7 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __atomic_load_1@plt
+; RV32I-NEXT: call __atomic_load_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -189,7 +189,7 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __atomic_load_1@plt
+; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -242,7 +242,7 @@ define i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_2@plt
+; RV32I-NEXT: call __atomic_load_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -257,7 +257,7 @@ define i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_2@plt
+; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -276,7 +276,7 @@ define i16 @atomic_load_i16_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_2@plt
+; RV32I-NEXT: call __atomic_load_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -291,7 +291,7 @@ define i16 @atomic_load_i16_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_2@plt
+; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -310,7 +310,7 @@ define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 2
-; RV32I-NEXT: call __atomic_load_2@plt
+; RV32I-NEXT: call __atomic_load_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -331,7 +331,7 @@ define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 2
-; RV64I-NEXT: call __atomic_load_2@plt
+; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -378,7 +378,7 @@ define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __atomic_load_2@plt
+; RV32I-NEXT: call __atomic_load_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -401,7 +401,7 @@ define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __atomic_load_2@plt
+; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -454,7 +454,7 @@ define i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_4@plt
+; RV32I-NEXT: call __atomic_load_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -469,7 +469,7 @@ define i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_4@plt
+; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -488,7 +488,7 @@ define i32 @atomic_load_i32_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_4@plt
+; RV32I-NEXT: call __atomic_load_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -503,7 +503,7 @@ define i32 @atomic_load_i32_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_4@plt
+; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -522,7 +522,7 @@ define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 2
-; RV32I-NEXT: call __atomic_load_4@plt
+; RV32I-NEXT: call __atomic_load_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -543,7 +543,7 @@ define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 2
-; RV64I-NEXT: call __atomic_load_4@plt
+; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -590,7 +590,7 @@ define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __atomic_load_4@plt
+; RV32I-NEXT: call __atomic_load_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -613,7 +613,7 @@ define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __atomic_load_4@plt
+; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -666,7 +666,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_8@plt
+; RV32I-NEXT: call __atomic_load_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -676,7 +676,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a1, 0
-; RV32IA-NEXT: call __atomic_load_8@plt
+; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -686,7 +686,7 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_8@plt
+; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -705,7 +705,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_8@plt
+; RV32I-NEXT: call __atomic_load_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -715,7 +715,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a1, 0
-; RV32IA-NEXT: call __atomic_load_8@plt
+; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -725,7 +725,7 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_8@plt
+; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -744,7 +744,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 2
-; RV32I-NEXT: call __atomic_load_8@plt
+; RV32I-NEXT: call __atomic_load_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -754,7 +754,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a1, 2
-; RV32IA-NEXT: call __atomic_load_8@plt
+; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -764,7 +764,7 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 2
-; RV64I-NEXT: call __atomic_load_8@plt
+; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -800,7 +800,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __atomic_load_8@plt
+; RV32I-NEXT: call __atomic_load_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -810,7 +810,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a1, 5
-; RV32IA-NEXT: call __atomic_load_8@plt
+; RV32IA-NEXT: call __atomic_load_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -820,7 +820,7 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __atomic_load_8@plt
+; RV64I-NEXT: call __atomic_load_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -860,7 +860,7 @@ define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_store_1@plt
+; RV32I-NEXT: call __atomic_store_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -875,7 +875,7 @@ define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_1@plt
+; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -894,7 +894,7 @@ define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_store_1@plt
+; RV32I-NEXT: call __atomic_store_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -909,7 +909,7 @@ define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_1@plt
+; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -928,7 +928,7 @@ define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_store_1@plt
+; RV32I-NEXT: call __atomic_store_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -949,7 +949,7 @@ define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_store_1@plt
+; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -996,7 +996,7 @@ define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_store_1@plt
+; RV32I-NEXT: call __atomic_store_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1018,7 +1018,7 @@ define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_store_1@plt
+; RV64I-NEXT: call __atomic_store_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1070,7 +1070,7 @@ define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_store_2@plt
+; RV32I-NEXT: call __atomic_store_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1085,7 +1085,7 @@ define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_2@plt
+; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1104,7 +1104,7 @@ define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_store_2@plt
+; RV32I-NEXT: call __atomic_store_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1119,7 +1119,7 @@ define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_2@plt
+; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1138,7 +1138,7 @@ define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_store_2@plt
+; RV32I-NEXT: call __atomic_store_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1159,7 +1159,7 @@ define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_store_2@plt
+; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1206,7 +1206,7 @@ define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_store_2@plt
+; RV32I-NEXT: call __atomic_store_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1228,7 +1228,7 @@ define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_store_2@plt
+; RV64I-NEXT: call __atomic_store_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1280,7 +1280,7 @@ define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_store_4@plt
+; RV32I-NEXT: call __atomic_store_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1295,7 +1295,7 @@ define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_4@plt
+; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1314,7 +1314,7 @@ define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_store_4@plt
+; RV32I-NEXT: call __atomic_store_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1329,7 +1329,7 @@ define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_4@plt
+; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1348,7 +1348,7 @@ define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_store_4@plt
+; RV32I-NEXT: call __atomic_store_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1369,7 +1369,7 @@ define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_store_4@plt
+; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1416,7 +1416,7 @@ define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_store_4@plt
+; RV32I-NEXT: call __atomic_store_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1438,7 +1438,7 @@ define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_store_4@plt
+; RV64I-NEXT: call __atomic_store_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1490,7 +1490,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_store_8@plt
+; RV32I-NEXT: call __atomic_store_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1500,7 +1500,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_store_8@plt
+; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -1510,7 +1510,7 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_8@plt
+; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1529,7 +1529,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_store_8@plt
+; RV32I-NEXT: call __atomic_store_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1539,7 +1539,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_store_8@plt
+; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -1549,7 +1549,7 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_store_8@plt
+; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1568,7 +1568,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_store_8@plt
+; RV32I-NEXT: call __atomic_store_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1578,7 +1578,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_store_8@plt
+; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -1588,7 +1588,7 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_store_8@plt
+; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1624,7 +1624,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_store_8@plt
+; RV32I-NEXT: call __atomic_store_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1634,7 +1634,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_store_8@plt
+; RV32IA-NEXT: call __atomic_store_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -1644,7 +1644,7 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_store_8@plt
+; RV64I-NEXT: call __atomic_store_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
index 895852b84e00..8d3fc9610926 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll
@@ -24,7 +24,7 @@ define void @amoswap_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a3, 5
-; RV32-NEXT: call __atomic_exchange_8@plt
+; RV32-NEXT: call __atomic_exchange_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -57,7 +57,7 @@ define void @amoadd_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a3, 5
-; RV32-NEXT: call __atomic_fetch_add_8@plt
+; RV32-NEXT: call __atomic_fetch_add_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -90,7 +90,7 @@ define void @amoand_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a3, 5
-; RV32-NEXT: call __atomic_fetch_and_8@plt
+; RV32-NEXT: call __atomic_fetch_and_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -123,7 +123,7 @@ define void @amoor_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a3, 5
-; RV32-NEXT: call __atomic_fetch_or_8@plt
+; RV32-NEXT: call __atomic_fetch_or_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -156,7 +156,7 @@ define void @amoxor_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a3, 5
-; RV32-NEXT: call __atomic_fetch_or_8@plt
+; RV32-NEXT: call __atomic_fetch_or_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -205,7 +205,7 @@ define void @amomax_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a4, 12(sp)
; RV32-NEXT: lw a5, 8(sp)
; RV32-NEXT: bnez a0, .LBB11_6
@@ -281,7 +281,7 @@ define void @amomaxu_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a4, 12(sp)
; RV32-NEXT: lw a5, 8(sp)
; RV32-NEXT: bnez a0, .LBB13_6
@@ -357,7 +357,7 @@ define void @amomin_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a4, 12(sp)
; RV32-NEXT: lw a5, 8(sp)
; RV32-NEXT: bnez a0, .LBB15_6
@@ -433,7 +433,7 @@ define void @amominu_d_discard(ptr %a, i64 %b) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a4, 12(sp)
; RV32-NEXT: lw a5, 8(sp)
; RV32-NEXT: bnez a0, .LBB17_6
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
index 9fcf4c1b0541..4dafd6a08d97 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
@@ -15,7 +15,7 @@ define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -32,7 +32,7 @@ define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -54,7 +54,7 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -66,7 +66,7 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
; RV32IA-NEXT: li a1, 1
; RV32IA-NEXT: li a3, 5
; RV32IA-NEXT: li a2, 0
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -77,7 +77,7 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -98,7 +98,7 @@ define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sub a1, a1, a2
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -115,7 +115,7 @@ define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: subw a1, a1, a2
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -140,7 +140,7 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
; RV32I-NEXT: sub a2, a2, a5
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -154,7 +154,7 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
; RV32IA-NEXT: sub a2, a2, a5
; RV32IA-NEXT: sub a1, a1, a3
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -165,7 +165,7 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sub a1, a1, a2
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index e97a1ea5dfca..d4c067b7b8a4 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -18,7 +18,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -48,7 +48,7 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -82,7 +82,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -132,7 +132,7 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -186,7 +186,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -236,7 +236,7 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -290,7 +290,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -340,7 +340,7 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -394,7 +394,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -424,7 +424,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -462,7 +462,7 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -484,7 +484,7 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -510,7 +510,7 @@ define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -543,7 +543,7 @@ define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -580,7 +580,7 @@ define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -613,7 +613,7 @@ define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -650,7 +650,7 @@ define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -683,7 +683,7 @@ define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -720,7 +720,7 @@ define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -753,7 +753,7 @@ define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -790,7 +790,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 255
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -811,7 +811,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 255
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -836,7 +836,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 255
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -867,7 +867,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 255
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -902,7 +902,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 255
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -933,7 +933,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 255
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -968,7 +968,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 255
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -999,7 +999,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 255
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1034,7 +1034,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 255
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1065,7 +1065,7 @@ define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 255
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1099,7 +1099,7 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_add_1@plt
+; RV32I-NEXT: call __atomic_fetch_add_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1129,7 +1129,7 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_1@plt
+; RV64I-NEXT: call __atomic_fetch_add_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1163,7 +1163,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_add_1@plt
+; RV32I-NEXT: call __atomic_fetch_add_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1213,7 +1213,7 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_add_1@plt
+; RV64I-NEXT: call __atomic_fetch_add_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1267,7 +1267,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_add_1@plt
+; RV32I-NEXT: call __atomic_fetch_add_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1317,7 +1317,7 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_add_1@plt
+; RV64I-NEXT: call __atomic_fetch_add_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1371,7 +1371,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_add_1@plt
+; RV32I-NEXT: call __atomic_fetch_add_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1421,7 +1421,7 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_add_1@plt
+; RV64I-NEXT: call __atomic_fetch_add_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1475,7 +1475,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_add_1@plt
+; RV32I-NEXT: call __atomic_fetch_add_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1505,7 +1505,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_add_1@plt
+; RV64I-NEXT: call __atomic_fetch_add_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1539,7 +1539,7 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_1@plt
+; RV32I-NEXT: call __atomic_fetch_sub_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1569,7 +1569,7 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_1@plt
+; RV64I-NEXT: call __atomic_fetch_sub_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1603,7 +1603,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_sub_1@plt
+; RV32I-NEXT: call __atomic_fetch_sub_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1653,7 +1653,7 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_sub_1@plt
+; RV64I-NEXT: call __atomic_fetch_sub_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1707,7 +1707,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_sub_1@plt
+; RV32I-NEXT: call __atomic_fetch_sub_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1757,7 +1757,7 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_sub_1@plt
+; RV64I-NEXT: call __atomic_fetch_sub_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1811,7 +1811,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_sub_1@plt
+; RV32I-NEXT: call __atomic_fetch_sub_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1861,7 +1861,7 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_sub_1@plt
+; RV64I-NEXT: call __atomic_fetch_sub_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1915,7 +1915,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_sub_1@plt
+; RV32I-NEXT: call __atomic_fetch_sub_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1945,7 +1945,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_1@plt
+; RV64I-NEXT: call __atomic_fetch_sub_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1979,7 +1979,7 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_and_1@plt
+; RV32I-NEXT: call __atomic_fetch_and_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2003,7 +2003,7 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_1@plt
+; RV64I-NEXT: call __atomic_fetch_and_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2031,7 +2031,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_and_1@plt
+; RV32I-NEXT: call __atomic_fetch_and_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2069,7 +2069,7 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_and_1@plt
+; RV64I-NEXT: call __atomic_fetch_and_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2111,7 +2111,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_and_1@plt
+; RV32I-NEXT: call __atomic_fetch_and_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2149,7 +2149,7 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_and_1@plt
+; RV64I-NEXT: call __atomic_fetch_and_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2191,7 +2191,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_and_1@plt
+; RV32I-NEXT: call __atomic_fetch_and_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2229,7 +2229,7 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_and_1@plt
+; RV64I-NEXT: call __atomic_fetch_and_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2271,7 +2271,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_and_1@plt
+; RV32I-NEXT: call __atomic_fetch_and_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2309,7 +2309,7 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_and_1@plt
+; RV64I-NEXT: call __atomic_fetch_and_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2351,7 +2351,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_nand_1@plt
+; RV32I-NEXT: call __atomic_fetch_nand_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2382,7 +2382,7 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_1@plt
+; RV64I-NEXT: call __atomic_fetch_nand_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2417,7 +2417,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_nand_1@plt
+; RV32I-NEXT: call __atomic_fetch_nand_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2469,7 +2469,7 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_nand_1@plt
+; RV64I-NEXT: call __atomic_fetch_nand_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2525,7 +2525,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_nand_1@plt
+; RV32I-NEXT: call __atomic_fetch_nand_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2577,7 +2577,7 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_nand_1@plt
+; RV64I-NEXT: call __atomic_fetch_nand_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2633,7 +2633,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_nand_1@plt
+; RV32I-NEXT: call __atomic_fetch_nand_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2685,7 +2685,7 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_nand_1@plt
+; RV64I-NEXT: call __atomic_fetch_nand_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2741,7 +2741,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_nand_1@plt
+; RV32I-NEXT: call __atomic_fetch_nand_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2772,7 +2772,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_nand_1@plt
+; RV64I-NEXT: call __atomic_fetch_nand_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2807,7 +2807,7 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_or_1@plt
+; RV32I-NEXT: call __atomic_fetch_or_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2827,7 +2827,7 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_1@plt
+; RV64I-NEXT: call __atomic_fetch_or_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2851,7 +2851,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_or_1@plt
+; RV32I-NEXT: call __atomic_fetch_or_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2881,7 +2881,7 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_or_1@plt
+; RV64I-NEXT: call __atomic_fetch_or_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2915,7 +2915,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_or_1@plt
+; RV32I-NEXT: call __atomic_fetch_or_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2945,7 +2945,7 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_or_1@plt
+; RV64I-NEXT: call __atomic_fetch_or_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2979,7 +2979,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_or_1@plt
+; RV32I-NEXT: call __atomic_fetch_or_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3009,7 +3009,7 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_or_1@plt
+; RV64I-NEXT: call __atomic_fetch_or_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3043,7 +3043,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_or_1@plt
+; RV32I-NEXT: call __atomic_fetch_or_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3073,7 +3073,7 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_or_1@plt
+; RV64I-NEXT: call __atomic_fetch_or_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3107,7 +3107,7 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_xor_1@plt
+; RV32I-NEXT: call __atomic_fetch_xor_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3127,7 +3127,7 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_1@plt
+; RV64I-NEXT: call __atomic_fetch_xor_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3151,7 +3151,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_xor_1@plt
+; RV32I-NEXT: call __atomic_fetch_xor_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3181,7 +3181,7 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_xor_1@plt
+; RV64I-NEXT: call __atomic_fetch_xor_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3215,7 +3215,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_xor_1@plt
+; RV32I-NEXT: call __atomic_fetch_xor_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3245,7 +3245,7 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_xor_1@plt
+; RV64I-NEXT: call __atomic_fetch_xor_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3279,7 +3279,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_xor_1@plt
+; RV32I-NEXT: call __atomic_fetch_xor_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3309,7 +3309,7 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_xor_1@plt
+; RV64I-NEXT: call __atomic_fetch_xor_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3343,7 +3343,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_xor_1@plt
+; RV32I-NEXT: call __atomic_fetch_xor_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3373,7 +3373,7 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_xor_1@plt
+; RV64I-NEXT: call __atomic_fetch_xor_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3422,7 +3422,7 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB45_4
; RV32I-NEXT: .LBB45_2: # %atomicrmw.start
@@ -3493,7 +3493,7 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB45_4
; RV64I-NEXT: .LBB45_2: # %atomicrmw.start
@@ -3568,7 +3568,7 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB46_4
; RV32I-NEXT: .LBB46_2: # %atomicrmw.start
@@ -3668,7 +3668,7 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB46_4
; RV64I-NEXT: .LBB46_2: # %atomicrmw.start
@@ -3772,7 +3772,7 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB47_4
; RV32I-NEXT: .LBB47_2: # %atomicrmw.start
@@ -3872,7 +3872,7 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB47_4
; RV64I-NEXT: .LBB47_2: # %atomicrmw.start
@@ -3976,7 +3976,7 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB48_4
; RV32I-NEXT: .LBB48_2: # %atomicrmw.start
@@ -4076,7 +4076,7 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB48_4
; RV64I-NEXT: .LBB48_2: # %atomicrmw.start
@@ -4180,7 +4180,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB49_4
; RV32I-NEXT: .LBB49_2: # %atomicrmw.start
@@ -4251,7 +4251,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB49_4
; RV64I-NEXT: .LBB49_2: # %atomicrmw.start
@@ -4326,7 +4326,7 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB50_4
; RV32I-NEXT: .LBB50_2: # %atomicrmw.start
@@ -4397,7 +4397,7 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB50_4
; RV64I-NEXT: .LBB50_2: # %atomicrmw.start
@@ -4472,7 +4472,7 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB51_4
; RV32I-NEXT: .LBB51_2: # %atomicrmw.start
@@ -4572,7 +4572,7 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB51_4
; RV64I-NEXT: .LBB51_2: # %atomicrmw.start
@@ -4676,7 +4676,7 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB52_4
; RV32I-NEXT: .LBB52_2: # %atomicrmw.start
@@ -4776,7 +4776,7 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB52_4
; RV64I-NEXT: .LBB52_2: # %atomicrmw.start
@@ -4880,7 +4880,7 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB53_4
; RV32I-NEXT: .LBB53_2: # %atomicrmw.start
@@ -4980,7 +4980,7 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB53_4
; RV64I-NEXT: .LBB53_2: # %atomicrmw.start
@@ -5084,7 +5084,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB54_4
; RV32I-NEXT: .LBB54_2: # %atomicrmw.start
@@ -5155,7 +5155,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB54_4
; RV64I-NEXT: .LBB54_2: # %atomicrmw.start
@@ -5229,7 +5229,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB55_4
; RV32I-NEXT: .LBB55_2: # %atomicrmw.start
@@ -5293,7 +5293,7 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB55_4
; RV64I-NEXT: .LBB55_2: # %atomicrmw.start
@@ -5361,7 +5361,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB56_4
; RV32I-NEXT: .LBB56_2: # %atomicrmw.start
@@ -5449,7 +5449,7 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB56_4
; RV64I-NEXT: .LBB56_2: # %atomicrmw.start
@@ -5541,7 +5541,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB57_4
; RV32I-NEXT: .LBB57_2: # %atomicrmw.start
@@ -5629,7 +5629,7 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB57_4
; RV64I-NEXT: .LBB57_2: # %atomicrmw.start
@@ -5721,7 +5721,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB58_4
; RV32I-NEXT: .LBB58_2: # %atomicrmw.start
@@ -5809,7 +5809,7 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB58_4
; RV64I-NEXT: .LBB58_2: # %atomicrmw.start
@@ -5901,7 +5901,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB59_4
; RV32I-NEXT: .LBB59_2: # %atomicrmw.start
@@ -5965,7 +5965,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB59_4
; RV64I-NEXT: .LBB59_2: # %atomicrmw.start
@@ -6033,7 +6033,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB60_4
; RV32I-NEXT: .LBB60_2: # %atomicrmw.start
@@ -6097,7 +6097,7 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB60_4
; RV64I-NEXT: .LBB60_2: # %atomicrmw.start
@@ -6165,7 +6165,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB61_4
; RV32I-NEXT: .LBB61_2: # %atomicrmw.start
@@ -6253,7 +6253,7 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB61_4
; RV64I-NEXT: .LBB61_2: # %atomicrmw.start
@@ -6345,7 +6345,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB62_4
; RV32I-NEXT: .LBB62_2: # %atomicrmw.start
@@ -6433,7 +6433,7 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB62_4
; RV64I-NEXT: .LBB62_2: # %atomicrmw.start
@@ -6525,7 +6525,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB63_4
; RV32I-NEXT: .LBB63_2: # %atomicrmw.start
@@ -6613,7 +6613,7 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB63_4
; RV64I-NEXT: .LBB63_2: # %atomicrmw.start
@@ -6705,7 +6705,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB64_4
; RV32I-NEXT: .LBB64_2: # %atomicrmw.start
@@ -6769,7 +6769,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB64_4
; RV64I-NEXT: .LBB64_2: # %atomicrmw.start
@@ -6823,7 +6823,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -6854,7 +6854,7 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -6889,7 +6889,7 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -6941,7 +6941,7 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -6997,7 +6997,7 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7049,7 +7049,7 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7105,7 +7105,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7157,7 +7157,7 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7213,7 +7213,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7244,7 +7244,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7283,7 +7283,7 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7306,7 +7306,7 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7333,7 +7333,7 @@ define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7368,7 +7368,7 @@ define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7407,7 +7407,7 @@ define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7442,7 +7442,7 @@ define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7481,7 +7481,7 @@ define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7516,7 +7516,7 @@ define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7555,7 +7555,7 @@ define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7590,7 +7590,7 @@ define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7630,7 +7630,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7653,7 +7653,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7680,7 +7680,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7714,7 +7714,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7752,7 +7752,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7786,7 +7786,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7824,7 +7824,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7858,7 +7858,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7896,7 +7896,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7930,7 +7930,7 @@ define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7966,7 +7966,7 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_add_2@plt
+; RV32I-NEXT: call __atomic_fetch_add_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7997,7 +7997,7 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_2@plt
+; RV64I-NEXT: call __atomic_fetch_add_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8032,7 +8032,7 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_add_2@plt
+; RV32I-NEXT: call __atomic_fetch_add_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8084,7 +8084,7 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_add_2@plt
+; RV64I-NEXT: call __atomic_fetch_add_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8140,7 +8140,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_add_2@plt
+; RV32I-NEXT: call __atomic_fetch_add_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8192,7 +8192,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_add_2@plt
+; RV64I-NEXT: call __atomic_fetch_add_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8248,7 +8248,7 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_add_2@plt
+; RV32I-NEXT: call __atomic_fetch_add_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8300,7 +8300,7 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_add_2@plt
+; RV64I-NEXT: call __atomic_fetch_add_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8356,7 +8356,7 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_add_2@plt
+; RV32I-NEXT: call __atomic_fetch_add_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8387,7 +8387,7 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_add_2@plt
+; RV64I-NEXT: call __atomic_fetch_add_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8422,7 +8422,7 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_2@plt
+; RV32I-NEXT: call __atomic_fetch_sub_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8453,7 +8453,7 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_2@plt
+; RV64I-NEXT: call __atomic_fetch_sub_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8488,7 +8488,7 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_sub_2@plt
+; RV32I-NEXT: call __atomic_fetch_sub_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8540,7 +8540,7 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_sub_2@plt
+; RV64I-NEXT: call __atomic_fetch_sub_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8596,7 +8596,7 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_sub_2@plt
+; RV32I-NEXT: call __atomic_fetch_sub_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8648,7 +8648,7 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_sub_2@plt
+; RV64I-NEXT: call __atomic_fetch_sub_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8704,7 +8704,7 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_sub_2@plt
+; RV32I-NEXT: call __atomic_fetch_sub_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8756,7 +8756,7 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_sub_2@plt
+; RV64I-NEXT: call __atomic_fetch_sub_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8812,7 +8812,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_sub_2@plt
+; RV32I-NEXT: call __atomic_fetch_sub_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8843,7 +8843,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_2@plt
+; RV64I-NEXT: call __atomic_fetch_sub_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8878,7 +8878,7 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_and_2@plt
+; RV32I-NEXT: call __atomic_fetch_and_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8903,7 +8903,7 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_2@plt
+; RV64I-NEXT: call __atomic_fetch_and_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -8932,7 +8932,7 @@ define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_and_2@plt
+; RV32I-NEXT: call __atomic_fetch_and_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -8972,7 +8972,7 @@ define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_and_2@plt
+; RV64I-NEXT: call __atomic_fetch_and_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9016,7 +9016,7 @@ define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_and_2@plt
+; RV32I-NEXT: call __atomic_fetch_and_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9056,7 +9056,7 @@ define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_and_2@plt
+; RV64I-NEXT: call __atomic_fetch_and_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9100,7 +9100,7 @@ define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_and_2@plt
+; RV32I-NEXT: call __atomic_fetch_and_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9140,7 +9140,7 @@ define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_and_2@plt
+; RV64I-NEXT: call __atomic_fetch_and_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9184,7 +9184,7 @@ define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_and_2@plt
+; RV32I-NEXT: call __atomic_fetch_and_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9224,7 +9224,7 @@ define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_and_2@plt
+; RV64I-NEXT: call __atomic_fetch_and_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9268,7 +9268,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_nand_2@plt
+; RV32I-NEXT: call __atomic_fetch_nand_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9300,7 +9300,7 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_2@plt
+; RV64I-NEXT: call __atomic_fetch_nand_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9336,7 +9336,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_nand_2@plt
+; RV32I-NEXT: call __atomic_fetch_nand_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9390,7 +9390,7 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_nand_2@plt
+; RV64I-NEXT: call __atomic_fetch_nand_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9448,7 +9448,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_nand_2@plt
+; RV32I-NEXT: call __atomic_fetch_nand_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9502,7 +9502,7 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_nand_2@plt
+; RV64I-NEXT: call __atomic_fetch_nand_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9560,7 +9560,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_nand_2@plt
+; RV32I-NEXT: call __atomic_fetch_nand_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9614,7 +9614,7 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_nand_2@plt
+; RV64I-NEXT: call __atomic_fetch_nand_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9672,7 +9672,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_nand_2@plt
+; RV32I-NEXT: call __atomic_fetch_nand_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9704,7 +9704,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_nand_2@plt
+; RV64I-NEXT: call __atomic_fetch_nand_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9740,7 +9740,7 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_or_2@plt
+; RV32I-NEXT: call __atomic_fetch_or_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9761,7 +9761,7 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_2@plt
+; RV64I-NEXT: call __atomic_fetch_or_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9786,7 +9786,7 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_or_2@plt
+; RV32I-NEXT: call __atomic_fetch_or_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9818,7 +9818,7 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_or_2@plt
+; RV64I-NEXT: call __atomic_fetch_or_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9854,7 +9854,7 @@ define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_or_2@plt
+; RV32I-NEXT: call __atomic_fetch_or_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9886,7 +9886,7 @@ define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_or_2@plt
+; RV64I-NEXT: call __atomic_fetch_or_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9922,7 +9922,7 @@ define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_or_2@plt
+; RV32I-NEXT: call __atomic_fetch_or_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -9954,7 +9954,7 @@ define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_or_2@plt
+; RV64I-NEXT: call __atomic_fetch_or_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -9990,7 +9990,7 @@ define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_or_2@plt
+; RV32I-NEXT: call __atomic_fetch_or_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -10022,7 +10022,7 @@ define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_or_2@plt
+; RV64I-NEXT: call __atomic_fetch_or_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -10058,7 +10058,7 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_xor_2@plt
+; RV32I-NEXT: call __atomic_fetch_xor_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -10079,7 +10079,7 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_2@plt
+; RV64I-NEXT: call __atomic_fetch_xor_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -10104,7 +10104,7 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_xor_2@plt
+; RV32I-NEXT: call __atomic_fetch_xor_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -10136,7 +10136,7 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_xor_2@plt
+; RV64I-NEXT: call __atomic_fetch_xor_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -10172,7 +10172,7 @@ define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_xor_2@plt
+; RV32I-NEXT: call __atomic_fetch_xor_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -10204,7 +10204,7 @@ define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_xor_2@plt
+; RV64I-NEXT: call __atomic_fetch_xor_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -10240,7 +10240,7 @@ define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_xor_2@plt
+; RV32I-NEXT: call __atomic_fetch_xor_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -10272,7 +10272,7 @@ define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_xor_2@plt
+; RV64I-NEXT: call __atomic_fetch_xor_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -10308,7 +10308,7 @@ define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_xor_2@plt
+; RV32I-NEXT: call __atomic_fetch_xor_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -10340,7 +10340,7 @@ define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_xor_2@plt
+; RV64I-NEXT: call __atomic_fetch_xor_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -10391,7 +10391,7 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB110_4
; RV32I-NEXT: .LBB110_2: # %atomicrmw.start
@@ -10464,7 +10464,7 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB110_4
; RV64I-NEXT: .LBB110_2: # %atomicrmw.start
@@ -10541,7 +10541,7 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB111_4
; RV32I-NEXT: .LBB111_2: # %atomicrmw.start
@@ -10645,7 +10645,7 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB111_4
; RV64I-NEXT: .LBB111_2: # %atomicrmw.start
@@ -10753,7 +10753,7 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB112_4
; RV32I-NEXT: .LBB112_2: # %atomicrmw.start
@@ -10857,7 +10857,7 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB112_4
; RV64I-NEXT: .LBB112_2: # %atomicrmw.start
@@ -10965,7 +10965,7 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB113_4
; RV32I-NEXT: .LBB113_2: # %atomicrmw.start
@@ -11069,7 +11069,7 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB113_4
; RV64I-NEXT: .LBB113_2: # %atomicrmw.start
@@ -11177,7 +11177,7 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB114_4
; RV32I-NEXT: .LBB114_2: # %atomicrmw.start
@@ -11250,7 +11250,7 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB114_4
; RV64I-NEXT: .LBB114_2: # %atomicrmw.start
@@ -11327,7 +11327,7 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB115_4
; RV32I-NEXT: .LBB115_2: # %atomicrmw.start
@@ -11400,7 +11400,7 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB115_4
; RV64I-NEXT: .LBB115_2: # %atomicrmw.start
@@ -11477,7 +11477,7 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB116_4
; RV32I-NEXT: .LBB116_2: # %atomicrmw.start
@@ -11581,7 +11581,7 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB116_4
; RV64I-NEXT: .LBB116_2: # %atomicrmw.start
@@ -11689,7 +11689,7 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB117_4
; RV32I-NEXT: .LBB117_2: # %atomicrmw.start
@@ -11793,7 +11793,7 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB117_4
; RV64I-NEXT: .LBB117_2: # %atomicrmw.start
@@ -11901,7 +11901,7 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB118_4
; RV32I-NEXT: .LBB118_2: # %atomicrmw.start
@@ -12005,7 +12005,7 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB118_4
; RV64I-NEXT: .LBB118_2: # %atomicrmw.start
@@ -12113,7 +12113,7 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB119_4
; RV32I-NEXT: .LBB119_2: # %atomicrmw.start
@@ -12186,7 +12186,7 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB119_4
; RV64I-NEXT: .LBB119_2: # %atomicrmw.start
@@ -12265,7 +12265,7 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB120_4
; RV32I-NEXT: .LBB120_2: # %atomicrmw.start
@@ -12334,7 +12334,7 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB120_4
; RV64I-NEXT: .LBB120_2: # %atomicrmw.start
@@ -12407,7 +12407,7 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB121_4
; RV32I-NEXT: .LBB121_2: # %atomicrmw.start
@@ -12501,7 +12501,7 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB121_4
; RV64I-NEXT: .LBB121_2: # %atomicrmw.start
@@ -12599,7 +12599,7 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB122_4
; RV32I-NEXT: .LBB122_2: # %atomicrmw.start
@@ -12693,7 +12693,7 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB122_4
; RV64I-NEXT: .LBB122_2: # %atomicrmw.start
@@ -12791,7 +12791,7 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB123_4
; RV32I-NEXT: .LBB123_2: # %atomicrmw.start
@@ -12885,7 +12885,7 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB123_4
; RV64I-NEXT: .LBB123_2: # %atomicrmw.start
@@ -12983,7 +12983,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB124_4
; RV32I-NEXT: .LBB124_2: # %atomicrmw.start
@@ -13052,7 +13052,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB124_4
; RV64I-NEXT: .LBB124_2: # %atomicrmw.start
@@ -13125,7 +13125,7 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB125_4
; RV32I-NEXT: .LBB125_2: # %atomicrmw.start
@@ -13194,7 +13194,7 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB125_4
; RV64I-NEXT: .LBB125_2: # %atomicrmw.start
@@ -13267,7 +13267,7 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB126_4
; RV32I-NEXT: .LBB126_2: # %atomicrmw.start
@@ -13361,7 +13361,7 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB126_4
; RV64I-NEXT: .LBB126_2: # %atomicrmw.start
@@ -13459,7 +13459,7 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB127_4
; RV32I-NEXT: .LBB127_2: # %atomicrmw.start
@@ -13553,7 +13553,7 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB127_4
; RV64I-NEXT: .LBB127_2: # %atomicrmw.start
@@ -13651,7 +13651,7 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB128_4
; RV32I-NEXT: .LBB128_2: # %atomicrmw.start
@@ -13745,7 +13745,7 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB128_4
; RV64I-NEXT: .LBB128_2: # %atomicrmw.start
@@ -13843,7 +13843,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB129_4
; RV32I-NEXT: .LBB129_2: # %atomicrmw.start
@@ -13912,7 +13912,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB129_4
; RV64I-NEXT: .LBB129_2: # %atomicrmw.start
@@ -13968,7 +13968,7 @@ define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_4@plt
+; RV32I-NEXT: call __atomic_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -13983,7 +13983,7 @@ define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_4@plt
+; RV64I-NEXT: call __atomic_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14002,7 +14002,7 @@ define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_exchange_4@plt
+; RV32I-NEXT: call __atomic_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14022,7 +14022,7 @@ define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_exchange_4@plt
+; RV64I-NEXT: call __atomic_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14046,7 +14046,7 @@ define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_exchange_4@plt
+; RV32I-NEXT: call __atomic_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14066,7 +14066,7 @@ define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_exchange_4@plt
+; RV64I-NEXT: call __atomic_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14090,7 +14090,7 @@ define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_exchange_4@plt
+; RV32I-NEXT: call __atomic_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14110,7 +14110,7 @@ define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_exchange_4@plt
+; RV64I-NEXT: call __atomic_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14134,7 +14134,7 @@ define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_exchange_4@plt
+; RV32I-NEXT: call __atomic_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14154,7 +14154,7 @@ define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_exchange_4@plt
+; RV64I-NEXT: call __atomic_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14178,7 +14178,7 @@ define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_add_4@plt
+; RV32I-NEXT: call __atomic_fetch_add_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14193,7 +14193,7 @@ define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_4@plt
+; RV64I-NEXT: call __atomic_fetch_add_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14212,7 +14212,7 @@ define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_add_4@plt
+; RV32I-NEXT: call __atomic_fetch_add_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14232,7 +14232,7 @@ define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_add_4@plt
+; RV64I-NEXT: call __atomic_fetch_add_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14256,7 +14256,7 @@ define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_add_4@plt
+; RV32I-NEXT: call __atomic_fetch_add_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14276,7 +14276,7 @@ define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_add_4@plt
+; RV64I-NEXT: call __atomic_fetch_add_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14300,7 +14300,7 @@ define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_add_4@plt
+; RV32I-NEXT: call __atomic_fetch_add_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14320,7 +14320,7 @@ define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_add_4@plt
+; RV64I-NEXT: call __atomic_fetch_add_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14344,7 +14344,7 @@ define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_add_4@plt
+; RV32I-NEXT: call __atomic_fetch_add_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14364,7 +14364,7 @@ define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_add_4@plt
+; RV64I-NEXT: call __atomic_fetch_add_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14388,7 +14388,7 @@ define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14404,7 +14404,7 @@ define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14424,7 +14424,7 @@ define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14446,7 +14446,7 @@ define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14472,7 +14472,7 @@ define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14494,7 +14494,7 @@ define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14520,7 +14520,7 @@ define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14542,7 +14542,7 @@ define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14568,7 +14568,7 @@ define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14590,7 +14590,7 @@ define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14616,7 +14616,7 @@ define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_and_4@plt
+; RV32I-NEXT: call __atomic_fetch_and_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14631,7 +14631,7 @@ define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_4@plt
+; RV64I-NEXT: call __atomic_fetch_and_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14650,7 +14650,7 @@ define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_and_4@plt
+; RV32I-NEXT: call __atomic_fetch_and_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14670,7 +14670,7 @@ define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_and_4@plt
+; RV64I-NEXT: call __atomic_fetch_and_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14694,7 +14694,7 @@ define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_and_4@plt
+; RV32I-NEXT: call __atomic_fetch_and_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14714,7 +14714,7 @@ define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_and_4@plt
+; RV64I-NEXT: call __atomic_fetch_and_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14738,7 +14738,7 @@ define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_and_4@plt
+; RV32I-NEXT: call __atomic_fetch_and_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14758,7 +14758,7 @@ define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_and_4@plt
+; RV64I-NEXT: call __atomic_fetch_and_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14782,7 +14782,7 @@ define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_and_4@plt
+; RV32I-NEXT: call __atomic_fetch_and_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14802,7 +14802,7 @@ define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_and_4@plt
+; RV64I-NEXT: call __atomic_fetch_and_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14826,7 +14826,7 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_nand_4@plt
+; RV32I-NEXT: call __atomic_fetch_nand_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14848,7 +14848,7 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_4@plt
+; RV64I-NEXT: call __atomic_fetch_nand_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14874,7 +14874,7 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_nand_4@plt
+; RV32I-NEXT: call __atomic_fetch_nand_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14908,7 +14908,7 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_nand_4@plt
+; RV64I-NEXT: call __atomic_fetch_nand_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -14946,7 +14946,7 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_nand_4@plt
+; RV32I-NEXT: call __atomic_fetch_nand_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -14980,7 +14980,7 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_nand_4@plt
+; RV64I-NEXT: call __atomic_fetch_nand_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15018,7 +15018,7 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_nand_4@plt
+; RV32I-NEXT: call __atomic_fetch_nand_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15052,7 +15052,7 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_nand_4@plt
+; RV64I-NEXT: call __atomic_fetch_nand_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15090,7 +15090,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_nand_4@plt
+; RV32I-NEXT: call __atomic_fetch_nand_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15112,7 +15112,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_nand_4@plt
+; RV64I-NEXT: call __atomic_fetch_nand_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15138,7 +15138,7 @@ define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_or_4@plt
+; RV32I-NEXT: call __atomic_fetch_or_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15153,7 +15153,7 @@ define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_4@plt
+; RV64I-NEXT: call __atomic_fetch_or_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15172,7 +15172,7 @@ define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_or_4@plt
+; RV32I-NEXT: call __atomic_fetch_or_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15192,7 +15192,7 @@ define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_or_4@plt
+; RV64I-NEXT: call __atomic_fetch_or_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15216,7 +15216,7 @@ define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_or_4@plt
+; RV32I-NEXT: call __atomic_fetch_or_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15236,7 +15236,7 @@ define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_or_4@plt
+; RV64I-NEXT: call __atomic_fetch_or_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15260,7 +15260,7 @@ define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_or_4@plt
+; RV32I-NEXT: call __atomic_fetch_or_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15280,7 +15280,7 @@ define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_or_4@plt
+; RV64I-NEXT: call __atomic_fetch_or_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15304,7 +15304,7 @@ define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_or_4@plt
+; RV32I-NEXT: call __atomic_fetch_or_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15324,7 +15324,7 @@ define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_or_4@plt
+; RV64I-NEXT: call __atomic_fetch_or_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15348,7 +15348,7 @@ define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_xor_4@plt
+; RV32I-NEXT: call __atomic_fetch_xor_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15363,7 +15363,7 @@ define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_4@plt
+; RV64I-NEXT: call __atomic_fetch_xor_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15382,7 +15382,7 @@ define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 2
-; RV32I-NEXT: call __atomic_fetch_xor_4@plt
+; RV32I-NEXT: call __atomic_fetch_xor_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15402,7 +15402,7 @@ define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_xor_4@plt
+; RV64I-NEXT: call __atomic_fetch_xor_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15426,7 +15426,7 @@ define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 3
-; RV32I-NEXT: call __atomic_fetch_xor_4@plt
+; RV32I-NEXT: call __atomic_fetch_xor_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15446,7 +15446,7 @@ define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_xor_4@plt
+; RV64I-NEXT: call __atomic_fetch_xor_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15470,7 +15470,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 4
-; RV32I-NEXT: call __atomic_fetch_xor_4@plt
+; RV32I-NEXT: call __atomic_fetch_xor_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15490,7 +15490,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_xor_4@plt
+; RV64I-NEXT: call __atomic_fetch_xor_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15514,7 +15514,7 @@ define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
-; RV32I-NEXT: call __atomic_fetch_xor_4@plt
+; RV32I-NEXT: call __atomic_fetch_xor_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -15534,7 +15534,7 @@ define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_xor_4@plt
+; RV64I-NEXT: call __atomic_fetch_xor_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -15570,7 +15570,7 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB165_4
; RV32I-NEXT: .LBB165_2: # %atomicrmw.start
@@ -15613,7 +15613,7 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB165_4
; RV64I-NEXT: .LBB165_2: # %atomicrmw.start
@@ -15659,7 +15659,7 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB166_4
; RV32I-NEXT: .LBB166_2: # %atomicrmw.start
@@ -15707,7 +15707,7 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB166_4
; RV64I-NEXT: .LBB166_2: # %atomicrmw.start
@@ -15758,7 +15758,7 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB167_4
; RV32I-NEXT: .LBB167_2: # %atomicrmw.start
@@ -15806,7 +15806,7 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB167_4
; RV64I-NEXT: .LBB167_2: # %atomicrmw.start
@@ -15857,7 +15857,7 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB168_4
; RV32I-NEXT: .LBB168_2: # %atomicrmw.start
@@ -15905,7 +15905,7 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB168_4
; RV64I-NEXT: .LBB168_2: # %atomicrmw.start
@@ -15956,7 +15956,7 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB169_4
; RV32I-NEXT: .LBB169_2: # %atomicrmw.start
@@ -16004,7 +16004,7 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB169_4
; RV64I-NEXT: .LBB169_2: # %atomicrmw.start
@@ -16055,7 +16055,7 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB170_4
; RV32I-NEXT: .LBB170_2: # %atomicrmw.start
@@ -16098,7 +16098,7 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB170_4
; RV64I-NEXT: .LBB170_2: # %atomicrmw.start
@@ -16144,7 +16144,7 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB171_4
; RV32I-NEXT: .LBB171_2: # %atomicrmw.start
@@ -16192,7 +16192,7 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB171_4
; RV64I-NEXT: .LBB171_2: # %atomicrmw.start
@@ -16243,7 +16243,7 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB172_4
; RV32I-NEXT: .LBB172_2: # %atomicrmw.start
@@ -16291,7 +16291,7 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB172_4
; RV64I-NEXT: .LBB172_2: # %atomicrmw.start
@@ -16342,7 +16342,7 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB173_4
; RV32I-NEXT: .LBB173_2: # %atomicrmw.start
@@ -16390,7 +16390,7 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB173_4
; RV64I-NEXT: .LBB173_2: # %atomicrmw.start
@@ -16441,7 +16441,7 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB174_4
; RV32I-NEXT: .LBB174_2: # %atomicrmw.start
@@ -16489,7 +16489,7 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB174_4
; RV64I-NEXT: .LBB174_2: # %atomicrmw.start
@@ -16540,7 +16540,7 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB175_4
; RV32I-NEXT: .LBB175_2: # %atomicrmw.start
@@ -16583,7 +16583,7 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB175_4
; RV64I-NEXT: .LBB175_2: # %atomicrmw.start
@@ -16629,7 +16629,7 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB176_4
; RV32I-NEXT: .LBB176_2: # %atomicrmw.start
@@ -16677,7 +16677,7 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB176_4
; RV64I-NEXT: .LBB176_2: # %atomicrmw.start
@@ -16728,7 +16728,7 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB177_4
; RV32I-NEXT: .LBB177_2: # %atomicrmw.start
@@ -16776,7 +16776,7 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB177_4
; RV64I-NEXT: .LBB177_2: # %atomicrmw.start
@@ -16827,7 +16827,7 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB178_4
; RV32I-NEXT: .LBB178_2: # %atomicrmw.start
@@ -16875,7 +16875,7 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB178_4
; RV64I-NEXT: .LBB178_2: # %atomicrmw.start
@@ -16926,7 +16926,7 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB179_4
; RV32I-NEXT: .LBB179_2: # %atomicrmw.start
@@ -16974,7 +16974,7 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB179_4
; RV64I-NEXT: .LBB179_2: # %atomicrmw.start
@@ -17025,7 +17025,7 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB180_4
; RV32I-NEXT: .LBB180_2: # %atomicrmw.start
@@ -17068,7 +17068,7 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB180_4
; RV64I-NEXT: .LBB180_2: # %atomicrmw.start
@@ -17114,7 +17114,7 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 2
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB181_4
; RV32I-NEXT: .LBB181_2: # %atomicrmw.start
@@ -17162,7 +17162,7 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB181_4
; RV64I-NEXT: .LBB181_2: # %atomicrmw.start
@@ -17213,7 +17213,7 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB182_4
; RV32I-NEXT: .LBB182_2: # %atomicrmw.start
@@ -17261,7 +17261,7 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB182_4
; RV64I-NEXT: .LBB182_2: # %atomicrmw.start
@@ -17312,7 +17312,7 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 4
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB183_4
; RV32I-NEXT: .LBB183_2: # %atomicrmw.start
@@ -17360,7 +17360,7 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB183_4
; RV64I-NEXT: .LBB183_2: # %atomicrmw.start
@@ -17411,7 +17411,7 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB184_4
; RV32I-NEXT: .LBB184_2: # %atomicrmw.start
@@ -17459,7 +17459,7 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB184_4
; RV64I-NEXT: .LBB184_2: # %atomicrmw.start
@@ -17498,7 +17498,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_exchange_8@plt
+; RV32I-NEXT: call __atomic_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17508,7 +17508,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_exchange_8@plt
+; RV32IA-NEXT: call __atomic_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17518,7 +17518,7 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_8@plt
+; RV64I-NEXT: call __atomic_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17537,7 +17537,7 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 2
-; RV32I-NEXT: call __atomic_exchange_8@plt
+; RV32I-NEXT: call __atomic_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17547,7 +17547,7 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 2
-; RV32IA-NEXT: call __atomic_exchange_8@plt
+; RV32IA-NEXT: call __atomic_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17557,7 +17557,7 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_exchange_8@plt
+; RV64I-NEXT: call __atomic_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17581,7 +17581,7 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_exchange_8@plt
+; RV32I-NEXT: call __atomic_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17591,7 +17591,7 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_exchange_8@plt
+; RV32IA-NEXT: call __atomic_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17601,7 +17601,7 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_exchange_8@plt
+; RV64I-NEXT: call __atomic_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17625,7 +17625,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 4
-; RV32I-NEXT: call __atomic_exchange_8@plt
+; RV32I-NEXT: call __atomic_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17635,7 +17635,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 4
-; RV32IA-NEXT: call __atomic_exchange_8@plt
+; RV32IA-NEXT: call __atomic_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17645,7 +17645,7 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_exchange_8@plt
+; RV64I-NEXT: call __atomic_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17669,7 +17669,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_exchange_8@plt
+; RV32I-NEXT: call __atomic_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17679,7 +17679,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_exchange_8@plt
+; RV32IA-NEXT: call __atomic_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17689,7 +17689,7 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_exchange_8@plt
+; RV64I-NEXT: call __atomic_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17713,7 +17713,7 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_add_8@plt
+; RV32I-NEXT: call __atomic_fetch_add_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17723,7 +17723,7 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_add_8@plt
+; RV32IA-NEXT: call __atomic_fetch_add_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17733,7 +17733,7 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_8@plt
+; RV64I-NEXT: call __atomic_fetch_add_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17752,7 +17752,7 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 2
-; RV32I-NEXT: call __atomic_fetch_add_8@plt
+; RV32I-NEXT: call __atomic_fetch_add_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17762,7 +17762,7 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 2
-; RV32IA-NEXT: call __atomic_fetch_add_8@plt
+; RV32IA-NEXT: call __atomic_fetch_add_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17772,7 +17772,7 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_add_8@plt
+; RV64I-NEXT: call __atomic_fetch_add_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17796,7 +17796,7 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_fetch_add_8@plt
+; RV32I-NEXT: call __atomic_fetch_add_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17806,7 +17806,7 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_fetch_add_8@plt
+; RV32IA-NEXT: call __atomic_fetch_add_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17816,7 +17816,7 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_add_8@plt
+; RV64I-NEXT: call __atomic_fetch_add_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17840,7 +17840,7 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 4
-; RV32I-NEXT: call __atomic_fetch_add_8@plt
+; RV32I-NEXT: call __atomic_fetch_add_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17850,7 +17850,7 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 4
-; RV32IA-NEXT: call __atomic_fetch_add_8@plt
+; RV32IA-NEXT: call __atomic_fetch_add_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17860,7 +17860,7 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_add_8@plt
+; RV64I-NEXT: call __atomic_fetch_add_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17884,7 +17884,7 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_fetch_add_8@plt
+; RV32I-NEXT: call __atomic_fetch_add_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17894,7 +17894,7 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_fetch_add_8@plt
+; RV32IA-NEXT: call __atomic_fetch_add_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17904,7 +17904,7 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_add_8@plt
+; RV64I-NEXT: call __atomic_fetch_add_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17928,7 +17928,7 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17938,7 +17938,7 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17948,7 +17948,7 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -17968,7 +17968,7 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 2
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -17978,7 +17978,7 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 2
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -17988,7 +17988,7 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18014,7 +18014,7 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18024,7 +18024,7 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18034,7 +18034,7 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18060,7 +18060,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 4
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18070,7 +18070,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 4
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18080,7 +18080,7 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18106,7 +18106,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18116,7 +18116,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18126,7 +18126,7 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18152,7 +18152,7 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_and_8@plt
+; RV32I-NEXT: call __atomic_fetch_and_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18162,7 +18162,7 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_and_8@plt
+; RV32IA-NEXT: call __atomic_fetch_and_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18172,7 +18172,7 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_8@plt
+; RV64I-NEXT: call __atomic_fetch_and_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18191,7 +18191,7 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 2
-; RV32I-NEXT: call __atomic_fetch_and_8@plt
+; RV32I-NEXT: call __atomic_fetch_and_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18201,7 +18201,7 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 2
-; RV32IA-NEXT: call __atomic_fetch_and_8@plt
+; RV32IA-NEXT: call __atomic_fetch_and_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18211,7 +18211,7 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_and_8@plt
+; RV64I-NEXT: call __atomic_fetch_and_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18235,7 +18235,7 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_fetch_and_8@plt
+; RV32I-NEXT: call __atomic_fetch_and_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18245,7 +18245,7 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_fetch_and_8@plt
+; RV32IA-NEXT: call __atomic_fetch_and_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18255,7 +18255,7 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_and_8@plt
+; RV64I-NEXT: call __atomic_fetch_and_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18279,7 +18279,7 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 4
-; RV32I-NEXT: call __atomic_fetch_and_8@plt
+; RV32I-NEXT: call __atomic_fetch_and_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18289,7 +18289,7 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 4
-; RV32IA-NEXT: call __atomic_fetch_and_8@plt
+; RV32IA-NEXT: call __atomic_fetch_and_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18299,7 +18299,7 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_and_8@plt
+; RV64I-NEXT: call __atomic_fetch_and_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18323,7 +18323,7 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_fetch_and_8@plt
+; RV32I-NEXT: call __atomic_fetch_and_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18333,7 +18333,7 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_fetch_and_8@plt
+; RV32IA-NEXT: call __atomic_fetch_and_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18343,7 +18343,7 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_and_8@plt
+; RV64I-NEXT: call __atomic_fetch_and_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18367,7 +18367,7 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_nand_8@plt
+; RV32I-NEXT: call __atomic_fetch_nand_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18377,7 +18377,7 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_nand_8@plt
+; RV32IA-NEXT: call __atomic_fetch_nand_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18387,7 +18387,7 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_8@plt
+; RV64I-NEXT: call __atomic_fetch_nand_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18413,7 +18413,7 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 2
-; RV32I-NEXT: call __atomic_fetch_nand_8@plt
+; RV32I-NEXT: call __atomic_fetch_nand_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18423,7 +18423,7 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 2
-; RV32IA-NEXT: call __atomic_fetch_nand_8@plt
+; RV32IA-NEXT: call __atomic_fetch_nand_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18433,7 +18433,7 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_nand_8@plt
+; RV64I-NEXT: call __atomic_fetch_nand_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18471,7 +18471,7 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_fetch_nand_8@plt
+; RV32I-NEXT: call __atomic_fetch_nand_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18481,7 +18481,7 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_fetch_nand_8@plt
+; RV32IA-NEXT: call __atomic_fetch_nand_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18491,7 +18491,7 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_nand_8@plt
+; RV64I-NEXT: call __atomic_fetch_nand_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18529,7 +18529,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 4
-; RV32I-NEXT: call __atomic_fetch_nand_8@plt
+; RV32I-NEXT: call __atomic_fetch_nand_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18539,7 +18539,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 4
-; RV32IA-NEXT: call __atomic_fetch_nand_8@plt
+; RV32IA-NEXT: call __atomic_fetch_nand_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18549,7 +18549,7 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_nand_8@plt
+; RV64I-NEXT: call __atomic_fetch_nand_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18587,7 +18587,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_fetch_nand_8@plt
+; RV32I-NEXT: call __atomic_fetch_nand_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18597,7 +18597,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_fetch_nand_8@plt
+; RV32IA-NEXT: call __atomic_fetch_nand_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18607,7 +18607,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_nand_8@plt
+; RV64I-NEXT: call __atomic_fetch_nand_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18633,7 +18633,7 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_or_8@plt
+; RV32I-NEXT: call __atomic_fetch_or_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18643,7 +18643,7 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_or_8@plt
+; RV32IA-NEXT: call __atomic_fetch_or_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18653,7 +18653,7 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_8@plt
+; RV64I-NEXT: call __atomic_fetch_or_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18672,7 +18672,7 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 2
-; RV32I-NEXT: call __atomic_fetch_or_8@plt
+; RV32I-NEXT: call __atomic_fetch_or_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18682,7 +18682,7 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 2
-; RV32IA-NEXT: call __atomic_fetch_or_8@plt
+; RV32IA-NEXT: call __atomic_fetch_or_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18692,7 +18692,7 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_or_8@plt
+; RV64I-NEXT: call __atomic_fetch_or_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18716,7 +18716,7 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_fetch_or_8@plt
+; RV32I-NEXT: call __atomic_fetch_or_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18726,7 +18726,7 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_fetch_or_8@plt
+; RV32IA-NEXT: call __atomic_fetch_or_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18736,7 +18736,7 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_or_8@plt
+; RV64I-NEXT: call __atomic_fetch_or_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18760,7 +18760,7 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 4
-; RV32I-NEXT: call __atomic_fetch_or_8@plt
+; RV32I-NEXT: call __atomic_fetch_or_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18770,7 +18770,7 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 4
-; RV32IA-NEXT: call __atomic_fetch_or_8@plt
+; RV32IA-NEXT: call __atomic_fetch_or_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18780,7 +18780,7 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_or_8@plt
+; RV64I-NEXT: call __atomic_fetch_or_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18804,7 +18804,7 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_fetch_or_8@plt
+; RV32I-NEXT: call __atomic_fetch_or_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18814,7 +18814,7 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_fetch_or_8@plt
+; RV32IA-NEXT: call __atomic_fetch_or_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18824,7 +18824,7 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_or_8@plt
+; RV64I-NEXT: call __atomic_fetch_or_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18848,7 +18848,7 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_xor_8@plt
+; RV32I-NEXT: call __atomic_fetch_xor_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18858,7 +18858,7 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_xor_8@plt
+; RV32IA-NEXT: call __atomic_fetch_xor_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18868,7 +18868,7 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_8@plt
+; RV64I-NEXT: call __atomic_fetch_xor_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18887,7 +18887,7 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 2
-; RV32I-NEXT: call __atomic_fetch_xor_8@plt
+; RV32I-NEXT: call __atomic_fetch_xor_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18897,7 +18897,7 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 2
-; RV32IA-NEXT: call __atomic_fetch_xor_8@plt
+; RV32IA-NEXT: call __atomic_fetch_xor_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18907,7 +18907,7 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 2
-; RV64I-NEXT: call __atomic_fetch_xor_8@plt
+; RV64I-NEXT: call __atomic_fetch_xor_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18931,7 +18931,7 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 3
-; RV32I-NEXT: call __atomic_fetch_xor_8@plt
+; RV32I-NEXT: call __atomic_fetch_xor_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18941,7 +18941,7 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 3
-; RV32IA-NEXT: call __atomic_fetch_xor_8@plt
+; RV32IA-NEXT: call __atomic_fetch_xor_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18951,7 +18951,7 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 3
-; RV64I-NEXT: call __atomic_fetch_xor_8@plt
+; RV64I-NEXT: call __atomic_fetch_xor_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -18975,7 +18975,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 4
-; RV32I-NEXT: call __atomic_fetch_xor_8@plt
+; RV32I-NEXT: call __atomic_fetch_xor_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -18985,7 +18985,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 4
-; RV32IA-NEXT: call __atomic_fetch_xor_8@plt
+; RV32IA-NEXT: call __atomic_fetch_xor_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -18995,7 +18995,7 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 4
-; RV64I-NEXT: call __atomic_fetch_xor_8@plt
+; RV64I-NEXT: call __atomic_fetch_xor_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -19019,7 +19019,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 5
-; RV32I-NEXT: call __atomic_fetch_xor_8@plt
+; RV32I-NEXT: call __atomic_fetch_xor_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -19029,7 +19029,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 5
-; RV32IA-NEXT: call __atomic_fetch_xor_8@plt
+; RV32IA-NEXT: call __atomic_fetch_xor_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -19039,7 +19039,7 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 5
-; RV64I-NEXT: call __atomic_fetch_xor_8@plt
+; RV64I-NEXT: call __atomic_fetch_xor_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -19079,7 +19079,7 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB220_7
@@ -19133,7 +19133,7 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB220_7
@@ -19183,7 +19183,7 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB220_4
; RV64I-NEXT: .LBB220_2: # %atomicrmw.start
@@ -19232,7 +19232,7 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB221_7
@@ -19286,7 +19286,7 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 2
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB221_7
@@ -19336,7 +19336,7 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB221_4
; RV64I-NEXT: .LBB221_2: # %atomicrmw.start
@@ -19390,7 +19390,7 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB222_7
@@ -19444,7 +19444,7 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 3
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB222_7
@@ -19494,7 +19494,7 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB222_4
; RV64I-NEXT: .LBB222_2: # %atomicrmw.start
@@ -19548,7 +19548,7 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 4
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB223_7
@@ -19602,7 +19602,7 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 4
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB223_7
@@ -19652,7 +19652,7 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB223_4
; RV64I-NEXT: .LBB223_2: # %atomicrmw.start
@@ -19706,7 +19706,7 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: li a5, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB224_7
@@ -19760,7 +19760,7 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 5
; RV32IA-NEXT: li a5, 5
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB224_7
@@ -19810,7 +19810,7 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB224_4
; RV64I-NEXT: .LBB224_2: # %atomicrmw.start
@@ -19864,7 +19864,7 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB225_7
@@ -19918,7 +19918,7 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB225_7
@@ -19968,7 +19968,7 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB225_4
; RV64I-NEXT: .LBB225_2: # %atomicrmw.start
@@ -20017,7 +20017,7 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB226_7
@@ -20071,7 +20071,7 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 2
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB226_7
@@ -20121,7 +20121,7 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB226_4
; RV64I-NEXT: .LBB226_2: # %atomicrmw.start
@@ -20175,7 +20175,7 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB227_7
@@ -20229,7 +20229,7 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 3
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB227_7
@@ -20279,7 +20279,7 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB227_4
; RV64I-NEXT: .LBB227_2: # %atomicrmw.start
@@ -20333,7 +20333,7 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 4
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB228_7
@@ -20387,7 +20387,7 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 4
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB228_7
@@ -20437,7 +20437,7 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB228_4
; RV64I-NEXT: .LBB228_2: # %atomicrmw.start
@@ -20491,7 +20491,7 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: li a5, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB229_7
@@ -20545,7 +20545,7 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 5
; RV32IA-NEXT: li a5, 5
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB229_7
@@ -20595,7 +20595,7 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB229_4
; RV64I-NEXT: .LBB229_2: # %atomicrmw.start
@@ -20649,7 +20649,7 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB230_7
@@ -20703,7 +20703,7 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB230_7
@@ -20753,7 +20753,7 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB230_4
; RV64I-NEXT: .LBB230_2: # %atomicrmw.start
@@ -20802,7 +20802,7 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB231_7
@@ -20856,7 +20856,7 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 2
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB231_7
@@ -20906,7 +20906,7 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB231_4
; RV64I-NEXT: .LBB231_2: # %atomicrmw.start
@@ -20960,7 +20960,7 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB232_7
@@ -21014,7 +21014,7 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 3
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB232_7
@@ -21064,7 +21064,7 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB232_4
; RV64I-NEXT: .LBB232_2: # %atomicrmw.start
@@ -21118,7 +21118,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 4
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB233_7
@@ -21172,7 +21172,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 4
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB233_7
@@ -21222,7 +21222,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB233_4
; RV64I-NEXT: .LBB233_2: # %atomicrmw.start
@@ -21276,7 +21276,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: li a5, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB234_7
@@ -21330,7 +21330,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 5
; RV32IA-NEXT: li a5, 5
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB234_7
@@ -21380,7 +21380,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB234_4
; RV64I-NEXT: .LBB234_2: # %atomicrmw.start
@@ -21434,7 +21434,7 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB235_7
@@ -21488,7 +21488,7 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB235_7
@@ -21538,7 +21538,7 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB235_4
; RV64I-NEXT: .LBB235_2: # %atomicrmw.start
@@ -21587,7 +21587,7 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 2
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB236_7
@@ -21641,7 +21641,7 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 2
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB236_7
@@ -21691,7 +21691,7 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 2
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB236_4
; RV64I-NEXT: .LBB236_2: # %atomicrmw.start
@@ -21745,7 +21745,7 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 3
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB237_7
@@ -21799,7 +21799,7 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 3
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB237_7
@@ -21849,7 +21849,7 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 3
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB237_4
; RV64I-NEXT: .LBB237_2: # %atomicrmw.start
@@ -21903,7 +21903,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 4
; RV32I-NEXT: li a5, 2
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB238_7
@@ -21957,7 +21957,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 4
; RV32IA-NEXT: li a5, 2
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB238_7
@@ -22007,7 +22007,7 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 4
; RV64I-NEXT: li a4, 2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB238_4
; RV64I-NEXT: .LBB238_2: # %atomicrmw.start
@@ -22061,7 +22061,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: li a5, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB239_7
@@ -22115,7 +22115,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: li a4, 5
; RV32IA-NEXT: li a5, 5
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB239_7
@@ -22165,7 +22165,7 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB239_4
; RV64I-NEXT: .LBB239_2: # %atomicrmw.start
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index 2739fde250ee..ef0c27f32801 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -14,7 +14,7 @@ define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_1@plt
+; RV32I-NEXT: call __atomic_load_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -31,7 +31,7 @@ define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_1@plt
+; RV64I-NEXT: call __atomic_load_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -52,7 +52,7 @@ define signext i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_2@plt
+; RV32I-NEXT: call __atomic_load_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -69,7 +69,7 @@ define signext i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_2@plt
+; RV64I-NEXT: call __atomic_load_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -90,7 +90,7 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __atomic_load_4@plt
+; RV32I-NEXT: call __atomic_load_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -105,7 +105,7 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __atomic_load_4@plt
+; RV64I-NEXT: call __atomic_load_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -126,7 +126,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_1@plt
+; RV32I-NEXT: call __atomic_exchange_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -160,7 +160,7 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_1@plt
+; RV64I-NEXT: call __atomic_exchange_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -198,7 +198,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_add_1@plt
+; RV32I-NEXT: call __atomic_fetch_add_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -232,7 +232,7 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_1@plt
+; RV64I-NEXT: call __atomic_fetch_add_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -270,7 +270,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_1@plt
+; RV32I-NEXT: call __atomic_fetch_sub_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -304,7 +304,7 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_1@plt
+; RV64I-NEXT: call __atomic_fetch_sub_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -342,7 +342,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_and_1@plt
+; RV32I-NEXT: call __atomic_fetch_and_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -370,7 +370,7 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_1@plt
+; RV64I-NEXT: call __atomic_fetch_and_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -402,7 +402,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_nand_1@plt
+; RV32I-NEXT: call __atomic_fetch_nand_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -437,7 +437,7 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_1@plt
+; RV64I-NEXT: call __atomic_fetch_nand_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -476,7 +476,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_or_1@plt
+; RV32I-NEXT: call __atomic_fetch_or_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -500,7 +500,7 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_1@plt
+; RV64I-NEXT: call __atomic_fetch_or_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -528,7 +528,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_xor_1@plt
+; RV32I-NEXT: call __atomic_fetch_xor_1
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -552,7 +552,7 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_1@plt
+; RV64I-NEXT: call __atomic_fetch_xor_1
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -595,7 +595,7 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB10_4
; RV32I-NEXT: .LBB10_2: # %atomicrmw.start
@@ -669,7 +669,7 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB10_4
; RV64I-NEXT: .LBB10_2: # %atomicrmw.start
@@ -747,7 +747,7 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB11_4
; RV32I-NEXT: .LBB11_2: # %atomicrmw.start
@@ -821,7 +821,7 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB11_4
; RV64I-NEXT: .LBB11_2: # %atomicrmw.start
@@ -898,7 +898,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB12_4
; RV32I-NEXT: .LBB12_2: # %atomicrmw.start
@@ -965,7 +965,7 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB12_4
; RV64I-NEXT: .LBB12_2: # %atomicrmw.start
@@ -1036,7 +1036,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB13_4
; RV32I-NEXT: .LBB13_2: # %atomicrmw.start
@@ -1103,7 +1103,7 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB13_4
; RV64I-NEXT: .LBB13_2: # %atomicrmw.start
@@ -1160,7 +1160,7 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_2@plt
+; RV32I-NEXT: call __atomic_exchange_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1195,7 +1195,7 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_2@plt
+; RV64I-NEXT: call __atomic_exchange_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1234,7 +1234,7 @@ define signext i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_add_2@plt
+; RV32I-NEXT: call __atomic_fetch_add_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1269,7 +1269,7 @@ define signext i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_2@plt
+; RV64I-NEXT: call __atomic_fetch_add_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1308,7 +1308,7 @@ define signext i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_2@plt
+; RV32I-NEXT: call __atomic_fetch_sub_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1343,7 +1343,7 @@ define signext i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_2@plt
+; RV64I-NEXT: call __atomic_fetch_sub_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1382,7 +1382,7 @@ define signext i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_and_2@plt
+; RV32I-NEXT: call __atomic_fetch_and_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1411,7 +1411,7 @@ define signext i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_2@plt
+; RV64I-NEXT: call __atomic_fetch_and_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1444,7 +1444,7 @@ define signext i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_nand_2@plt
+; RV32I-NEXT: call __atomic_fetch_nand_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1480,7 +1480,7 @@ define signext i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_2@plt
+; RV64I-NEXT: call __atomic_fetch_nand_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1520,7 +1520,7 @@ define signext i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_or_2@plt
+; RV32I-NEXT: call __atomic_fetch_or_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1545,7 +1545,7 @@ define signext i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_2@plt
+; RV64I-NEXT: call __atomic_fetch_or_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1574,7 +1574,7 @@ define signext i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_xor_2@plt
+; RV32I-NEXT: call __atomic_fetch_xor_2
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1599,7 +1599,7 @@ define signext i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_2@plt
+; RV64I-NEXT: call __atomic_fetch_xor_2
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1643,7 +1643,7 @@ define signext i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB21_4
; RV32I-NEXT: .LBB21_2: # %atomicrmw.start
@@ -1719,7 +1719,7 @@ define signext i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB21_4
; RV64I-NEXT: .LBB21_2: # %atomicrmw.start
@@ -1799,7 +1799,7 @@ define signext i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: bnez a0, .LBB22_4
; RV32I-NEXT: .LBB22_2: # %atomicrmw.start
@@ -1875,7 +1875,7 @@ define signext i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: bnez a0, .LBB22_4
; RV64I-NEXT: .LBB22_2: # %atomicrmw.start
@@ -1957,7 +1957,7 @@ define signext i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB23_4
; RV32I-NEXT: .LBB23_2: # %atomicrmw.start
@@ -2029,7 +2029,7 @@ define signext i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB23_4
; RV64I-NEXT: .LBB23_2: # %atomicrmw.start
@@ -2105,7 +2105,7 @@ define signext i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB24_4
; RV32I-NEXT: .LBB24_2: # %atomicrmw.start
@@ -2177,7 +2177,7 @@ define signext i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB24_4
; RV64I-NEXT: .LBB24_2: # %atomicrmw.start
@@ -2236,7 +2236,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_4@plt
+; RV32I-NEXT: call __atomic_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2251,7 +2251,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_4@plt
+; RV64I-NEXT: call __atomic_exchange_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2271,7 +2271,7 @@ define signext i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_add_4@plt
+; RV32I-NEXT: call __atomic_fetch_add_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2286,7 +2286,7 @@ define signext i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_4@plt
+; RV64I-NEXT: call __atomic_fetch_add_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2306,7 +2306,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2322,7 +2322,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2343,7 +2343,7 @@ define signext i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_and_4@plt
+; RV32I-NEXT: call __atomic_fetch_and_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2358,7 +2358,7 @@ define signext i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_4@plt
+; RV64I-NEXT: call __atomic_fetch_and_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2378,7 +2378,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_nand_4@plt
+; RV32I-NEXT: call __atomic_fetch_nand_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2400,7 +2400,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_4@plt
+; RV64I-NEXT: call __atomic_fetch_nand_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2427,7 +2427,7 @@ define signext i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_or_4@plt
+; RV32I-NEXT: call __atomic_fetch_or_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2442,7 +2442,7 @@ define signext i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_4@plt
+; RV64I-NEXT: call __atomic_fetch_or_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2462,7 +2462,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_xor_4@plt
+; RV32I-NEXT: call __atomic_fetch_xor_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2477,7 +2477,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_4@plt
+; RV64I-NEXT: call __atomic_fetch_xor_4
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2509,7 +2509,7 @@ define signext i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB32_4
; RV32I-NEXT: .LBB32_2: # %atomicrmw.start
@@ -2552,7 +2552,7 @@ define signext i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB32_4
; RV64I-NEXT: .LBB32_2: # %atomicrmw.start
@@ -2598,7 +2598,7 @@ define signext i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB33_4
; RV32I-NEXT: .LBB33_2: # %atomicrmw.start
@@ -2641,7 +2641,7 @@ define signext i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB33_4
; RV64I-NEXT: .LBB33_2: # %atomicrmw.start
@@ -2687,7 +2687,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB34_4
; RV32I-NEXT: .LBB34_2: # %atomicrmw.start
@@ -2730,7 +2730,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB34_4
; RV64I-NEXT: .LBB34_2: # %atomicrmw.start
@@ -2776,7 +2776,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB35_4
; RV32I-NEXT: .LBB35_2: # %atomicrmw.start
@@ -2819,7 +2819,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB35_4
; RV64I-NEXT: .LBB35_2: # %atomicrmw.start
@@ -2853,7 +2853,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_exchange_8@plt
+; RV32I-NEXT: call __atomic_exchange_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2863,7 +2863,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_exchange_8@plt
+; RV32IA-NEXT: call __atomic_exchange_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -2873,7 +2873,7 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_8@plt
+; RV64I-NEXT: call __atomic_exchange_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2892,7 +2892,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_add_8@plt
+; RV32I-NEXT: call __atomic_fetch_add_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2902,7 +2902,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_add_8@plt
+; RV32IA-NEXT: call __atomic_fetch_add_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -2912,7 +2912,7 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_8@plt
+; RV64I-NEXT: call __atomic_fetch_add_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2931,7 +2931,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_sub_8@plt
+; RV32I-NEXT: call __atomic_fetch_sub_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2941,7 +2941,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_sub_8@plt
+; RV32IA-NEXT: call __atomic_fetch_sub_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -2951,7 +2951,7 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_8@plt
+; RV64I-NEXT: call __atomic_fetch_sub_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2971,7 +2971,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_and_8@plt
+; RV32I-NEXT: call __atomic_fetch_and_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2981,7 +2981,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_and_8@plt
+; RV32IA-NEXT: call __atomic_fetch_and_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -2991,7 +2991,7 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_8@plt
+; RV64I-NEXT: call __atomic_fetch_and_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3010,7 +3010,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_nand_8@plt
+; RV32I-NEXT: call __atomic_fetch_nand_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3020,7 +3020,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_nand_8@plt
+; RV32IA-NEXT: call __atomic_fetch_nand_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3030,7 +3030,7 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_8@plt
+; RV64I-NEXT: call __atomic_fetch_nand_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3056,7 +3056,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_or_8@plt
+; RV32I-NEXT: call __atomic_fetch_or_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3066,7 +3066,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_or_8@plt
+; RV32IA-NEXT: call __atomic_fetch_or_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3076,7 +3076,7 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_8@plt
+; RV64I-NEXT: call __atomic_fetch_or_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3095,7 +3095,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __atomic_fetch_xor_8@plt
+; RV32I-NEXT: call __atomic_fetch_xor_8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3105,7 +3105,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, -16
; RV32IA-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IA-NEXT: li a3, 0
-; RV32IA-NEXT: call __atomic_fetch_xor_8@plt
+; RV32IA-NEXT: call __atomic_fetch_xor_8
; RV32IA-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
@@ -3115,7 +3115,7 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_8@plt
+; RV64I-NEXT: call __atomic_fetch_xor_8
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3150,7 +3150,7 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB43_7
@@ -3204,7 +3204,7 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB43_7
@@ -3254,7 +3254,7 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB43_4
; RV64I-NEXT: .LBB43_2: # %atomicrmw.start
@@ -3303,7 +3303,7 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB44_7
@@ -3357,7 +3357,7 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB44_7
@@ -3407,7 +3407,7 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB44_4
; RV64I-NEXT: .LBB44_2: # %atomicrmw.start
@@ -3456,7 +3456,7 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB45_7
@@ -3510,7 +3510,7 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB45_7
@@ -3560,7 +3560,7 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB45_4
; RV64I-NEXT: .LBB45_2: # %atomicrmw.start
@@ -3609,7 +3609,7 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a4, 0
; RV32I-NEXT: li a5, 0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB46_7
@@ -3663,7 +3663,7 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: mv a0, s0
; RV32IA-NEXT: li a4, 0
; RV32IA-NEXT: li a5, 0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB46_7
@@ -3713,7 +3713,7 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB46_4
; RV64I-NEXT: .LBB46_2: # %atomicrmw.start
@@ -3749,7 +3749,7 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lb a0, 11(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -3789,7 +3789,7 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lb a0, 7(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -3834,7 +3834,7 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
; RV32I-NEXT: addi a1, sp, 11
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3873,7 +3873,7 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
; RV64I-NEXT: addi a1, sp, 7
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3917,7 +3917,7 @@ define signext i16 @cmpxchg_i16_monotonic_monotonic_val0(ptr %ptr, i16 signext %
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a0, 10(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -3958,7 +3958,7 @@ define signext i16 @cmpxchg_i16_monotonic_monotonic_val0(ptr %ptr, i16 signext %
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a0, 6(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -4004,7 +4004,7 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(ptr %ptr, i16 signext %cmp, i16
; RV32I-NEXT: addi a1, sp, 10
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4044,7 +4044,7 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(ptr %ptr, i16 signext %cmp, i16
; RV64I-NEXT: addi a1, sp, 6
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4089,7 +4089,7 @@ define signext i32 @cmpxchg_i32_monotonic_monotonic_val0(ptr %ptr, i32 signext %
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a0, 8(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -4115,7 +4115,7 @@ define signext i32 @cmpxchg_i32_monotonic_monotonic_val0(ptr %ptr, i32 signext %
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a0, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -4146,7 +4146,7 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(ptr %ptr, i32 signext %cmp, i32
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4172,7 +4172,7 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(ptr %ptr, i32 signext %cmp, i32
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4204,7 +4204,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_exchange_4@plt
+; RV32I-NEXT: call __atomic_exchange_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4239,7 +4239,7 @@ define signext i32 @atomicrmw_xchg_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_exchange_4@plt
+; RV64I-NEXT: call __atomic_exchange_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: sext.w a0, a0
@@ -4291,7 +4291,7 @@ define signext i32 @atomicrmw_add_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_add_4@plt
+; RV32I-NEXT: call __atomic_fetch_add_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4326,7 +4326,7 @@ define signext i32 @atomicrmw_add_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_add_4@plt
+; RV64I-NEXT: call __atomic_fetch_add_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: sext.w a0, a0
@@ -4379,7 +4379,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_sub_4@plt
+; RV32I-NEXT: call __atomic_fetch_sub_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4414,7 +4414,7 @@ define signext i32 @atomicrmw_sub_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_sub_4@plt
+; RV64I-NEXT: call __atomic_fetch_sub_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: sext.w a0, a0
@@ -4467,7 +4467,7 @@ define signext i32 @atomicrmw_and_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_and_4@plt
+; RV32I-NEXT: call __atomic_fetch_and_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4502,7 +4502,7 @@ define signext i32 @atomicrmw_and_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_and_4@plt
+; RV64I-NEXT: call __atomic_fetch_and_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: sext.w a0, a0
@@ -4555,7 +4555,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_nand_4@plt
+; RV32I-NEXT: call __atomic_fetch_nand_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4597,7 +4597,7 @@ define signext i32 @atomicrmw_nand_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_nand_4@plt
+; RV64I-NEXT: call __atomic_fetch_nand_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: sext.w a0, a0
@@ -4657,7 +4657,7 @@ define signext i32 @atomicrmw_or_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_or_4@plt
+; RV32I-NEXT: call __atomic_fetch_or_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4692,7 +4692,7 @@ define signext i32 @atomicrmw_or_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_or_4@plt
+; RV64I-NEXT: call __atomic_fetch_or_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: sext.w a0, a0
@@ -4745,7 +4745,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a1, 1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __atomic_fetch_xor_4@plt
+; RV32I-NEXT: call __atomic_fetch_xor_4
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4780,7 +4780,7 @@ define signext i32 @atomicrmw_xor_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call __atomic_fetch_xor_4@plt
+; RV64I-NEXT: call __atomic_fetch_xor_4
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: sext.w a0, a0
@@ -4842,7 +4842,7 @@ define signext i32 @atomicrmw_max_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a1, 4(sp)
; RV32I-NEXT: bnez a0, .LBB60_8
; RV32I-NEXT: .LBB60_3: # %atomicrmw.start
@@ -4905,7 +4905,7 @@ define signext i32 @atomicrmw_max_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a1, 12(sp)
; RV64I-NEXT: bnez a0, .LBB60_8
; RV64I-NEXT: .LBB60_3: # %atomicrmw.start
@@ -4989,7 +4989,7 @@ define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a1, 0(sp)
; RV32I-NEXT: bnez a0, .LBB61_8
; RV32I-NEXT: .LBB61_3: # %atomicrmw.start
@@ -5055,7 +5055,7 @@ define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a1, 4(sp)
; RV64I-NEXT: bnez a0, .LBB61_8
; RV64I-NEXT: .LBB61_3: # %atomicrmw.start
@@ -5138,7 +5138,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a1, 4(sp)
; RV32I-NEXT: beqz a0, .LBB62_2
; RV32I-NEXT: j .LBB62_4
@@ -5188,7 +5188,7 @@ define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a1, 12(sp)
; RV64I-NEXT: bnez a0, .LBB62_6
; RV64I-NEXT: .LBB62_3: # %atomicrmw.start
@@ -5266,7 +5266,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a3, 0
; RV32I-NEXT: li a4, 0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a1, 0(sp)
; RV32I-NEXT: bnez a0, .LBB63_8
; RV32I-NEXT: .LBB63_3: # %atomicrmw.start
@@ -5334,7 +5334,7 @@ define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a3, 0
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a1, 4(sp)
; RV64I-NEXT: bnez a0, .LBB63_8
; RV64I-NEXT: .LBB63_3: # %atomicrmw.start
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
index 5f15a9c06710..aa962d68fc52 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
@@ -39,7 +39,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 3(sp)
; RV32I-NEXT: beqz a0, .LBB0_1
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
@@ -113,7 +113,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 7(sp)
; RV64I-NEXT: beqz a0, .LBB0_1
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
@@ -195,7 +195,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a3, 14(sp)
; RV32I-NEXT: beqz a0, .LBB1_1
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
@@ -275,7 +275,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a3, 14(sp)
; RV64I-NEXT: beqz a0, .LBB1_1
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
@@ -354,7 +354,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: beqz a0, .LBB2_1
; RV32I-NEXT: # %bb.2: # %atomicrmw.end
@@ -414,7 +414,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 4(sp)
; RV64I-NEXT: beqz a0, .LBB2_1
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
@@ -490,7 +490,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: li a5, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB3_5
@@ -545,7 +545,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; RV32IA-NEXT: li a4, 5
; RV32IA-NEXT: li a5, 5
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB3_5
@@ -589,7 +589,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: beqz a0, .LBB3_1
; RV64I-NEXT: # %bb.2: # %atomicrmw.end
@@ -653,7 +653,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_1@plt
+; RV32I-NEXT: call __atomic_compare_exchange_1
; RV32I-NEXT: lbu a3, 15(sp)
; RV32I-NEXT: bnez a0, .LBB4_4
; RV32I-NEXT: .LBB4_2: # %atomicrmw.start
@@ -749,7 +749,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_1@plt
+; RV64I-NEXT: call __atomic_compare_exchange_1
; RV64I-NEXT: lbu a3, 15(sp)
; RV64I-NEXT: bnez a0, .LBB4_4
; RV64I-NEXT: .LBB4_2: # %atomicrmw.start
@@ -853,7 +853,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __atomic_compare_exchange_2@plt
+; RV32I-NEXT: call __atomic_compare_exchange_2
; RV32I-NEXT: lh a1, 10(sp)
; RV32I-NEXT: bnez a0, .LBB5_4
; RV32I-NEXT: .LBB5_2: # %atomicrmw.start
@@ -955,7 +955,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __atomic_compare_exchange_2@plt
+; RV64I-NEXT: call __atomic_compare_exchange_2
; RV64I-NEXT: lh a1, 6(sp)
; RV64I-NEXT: bnez a0, .LBB5_4
; RV64I-NEXT: .LBB5_2: # %atomicrmw.start
@@ -1054,7 +1054,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
; RV32I-NEXT: li a3, 5
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_4@plt
+; RV32I-NEXT: call __atomic_compare_exchange_4
; RV32I-NEXT: lw a3, 0(sp)
; RV32I-NEXT: bnez a0, .LBB6_4
; RV32I-NEXT: .LBB6_2: # %atomicrmw.start
@@ -1135,7 +1135,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_4@plt
+; RV64I-NEXT: call __atomic_compare_exchange_4
; RV64I-NEXT: lw a3, 12(sp)
; RV64I-NEXT: bnez a0, .LBB6_4
; RV64I-NEXT: .LBB6_2: # %atomicrmw.start
@@ -1224,7 +1224,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
; RV32I-NEXT: li a4, 5
; RV32I-NEXT: li a5, 5
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __atomic_compare_exchange_8@plt
+; RV32I-NEXT: call __atomic_compare_exchange_8
; RV32I-NEXT: lw a5, 12(sp)
; RV32I-NEXT: lw a4, 8(sp)
; RV32I-NEXT: bnez a0, .LBB7_7
@@ -1287,7 +1287,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
; RV32IA-NEXT: li a4, 5
; RV32IA-NEXT: li a5, 5
; RV32IA-NEXT: mv a0, s0
-; RV32IA-NEXT: call __atomic_compare_exchange_8@plt
+; RV32IA-NEXT: call __atomic_compare_exchange_8
; RV32IA-NEXT: lw a5, 12(sp)
; RV32IA-NEXT: lw a4, 8(sp)
; RV32IA-NEXT: bnez a0, .LBB7_7
@@ -1345,7 +1345,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
; RV64I-NEXT: li a3, 5
; RV64I-NEXT: li a4, 5
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __atomic_compare_exchange_8@plt
+; RV64I-NEXT: call __atomic_compare_exchange_8
; RV64I-NEXT: ld a3, 0(sp)
; RV64I-NEXT: bnez a0, .LBB7_4
; RV64I-NEXT: .LBB7_2: # %atomicrmw.start
diff --git a/llvm/test/CodeGen/RISCV/bf16-promote.ll b/llvm/test/CodeGen/RISCV/bf16-promote.ll
index c8fc84729da7..c17450a80de9 100644
--- a/llvm/test/CodeGen/RISCV/bf16-promote.ll
+++ b/llvm/test/CodeGen/RISCV/bf16-promote.ll
@@ -45,7 +45,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: mv s0, a0
-; RV64-NEXT: call __truncsfbf2@plt
+; RV64-NEXT: call __truncsfbf2
; RV64-NEXT: fmv.x.w a0, fa0
; RV64-NEXT: sh a0, 0(s0)
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -59,7 +59,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: mv s0, a0
-; RV32-NEXT: call __truncsfbf2@plt
+; RV32-NEXT: call __truncsfbf2
; RV32-NEXT: fmv.x.w a0, fa0
; RV32-NEXT: sh a0, 0(s0)
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -78,7 +78,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64-NEXT: mv s0, a0
-; RV64-NEXT: call __truncdfbf2@plt
+; RV64-NEXT: call __truncdfbf2
; RV64-NEXT: fmv.x.w a0, fa0
; RV64-NEXT: sh a0, 0(s0)
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -92,7 +92,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-NEXT: mv s0, a0
-; RV32-NEXT: call __truncdfbf2@plt
+; RV32-NEXT: call __truncdfbf2
; RV32-NEXT: fmv.x.w a0, fa0
; RV32-NEXT: sh a0, 0(s0)
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -118,7 +118,7 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
; RV64-NEXT: slli a0, a0, 16
; RV64-NEXT: fmv.w.x fa4, a0
; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2@plt
+; RV64-NEXT: call __truncsfbf2
; RV64-NEXT: fmv.x.w a0, fa0
; RV64-NEXT: sh a0, 0(s0)
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -139,7 +139,7 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: fmv.w.x fa4, a0
; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2@plt
+; RV32-NEXT: call __truncsfbf2
; RV32-NEXT: fmv.x.w a0, fa0
; RV32-NEXT: sh a0, 0(s0)
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -151,4 +151,4 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
%r = fadd bfloat %a, %b
store bfloat %r, ptr %p
ret void
-} \ No newline at end of file
+}
diff --git a/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll b/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll
index 24f26af97770..165aa5f484f7 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-br-fcmp.ll
@@ -18,7 +18,7 @@ define void @br_fcmp_false(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB0_2: # %if.else
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_false:
; RV64IZFBFMIN: # %bb.0:
@@ -29,7 +29,7 @@ define void @br_fcmp_false(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB0_2: # %if.else
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp false bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -51,7 +51,7 @@ define void @br_fcmp_oeq(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB1_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_oeq:
; RV64IZFBFMIN: # %bb.0:
@@ -64,7 +64,7 @@ define void @br_fcmp_oeq(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB1_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp oeq bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -86,7 +86,7 @@ define void @br_fcmp_oeq_alt(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB2_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_oeq_alt:
; RV64IZFBFMIN: # %bb.0:
@@ -99,7 +99,7 @@ define void @br_fcmp_oeq_alt(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB2_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp oeq bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -121,7 +121,7 @@ define void @br_fcmp_ogt(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB3_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_ogt:
; RV64IZFBFMIN: # %bb.0:
@@ -134,7 +134,7 @@ define void @br_fcmp_ogt(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB3_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp ogt bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -156,7 +156,7 @@ define void @br_fcmp_oge(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB4_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_oge:
; RV64IZFBFMIN: # %bb.0:
@@ -169,7 +169,7 @@ define void @br_fcmp_oge(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB4_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp oge bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -191,7 +191,7 @@ define void @br_fcmp_olt(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB5_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_olt:
; RV64IZFBFMIN: # %bb.0:
@@ -204,7 +204,7 @@ define void @br_fcmp_olt(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB5_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp olt bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -226,7 +226,7 @@ define void @br_fcmp_ole(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB6_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_ole:
; RV64IZFBFMIN: # %bb.0:
@@ -239,7 +239,7 @@ define void @br_fcmp_ole(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB6_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp ole bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -263,7 +263,7 @@ define void @br_fcmp_one(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB7_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_one:
; RV64IZFBFMIN: # %bb.0:
@@ -278,7 +278,7 @@ define void @br_fcmp_one(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB7_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp one bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -302,7 +302,7 @@ define void @br_fcmp_ord(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB8_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_ord:
; RV64IZFBFMIN: # %bb.0:
@@ -317,7 +317,7 @@ define void @br_fcmp_ord(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB8_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp ord bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -341,7 +341,7 @@ define void @br_fcmp_ueq(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB9_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_ueq:
; RV64IZFBFMIN: # %bb.0:
@@ -356,7 +356,7 @@ define void @br_fcmp_ueq(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB9_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp ueq bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -378,7 +378,7 @@ define void @br_fcmp_ugt(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB10_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_ugt:
; RV64IZFBFMIN: # %bb.0:
@@ -391,7 +391,7 @@ define void @br_fcmp_ugt(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB10_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp ugt bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -413,7 +413,7 @@ define void @br_fcmp_uge(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB11_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_uge:
; RV64IZFBFMIN: # %bb.0:
@@ -426,7 +426,7 @@ define void @br_fcmp_uge(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB11_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp uge bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -448,7 +448,7 @@ define void @br_fcmp_ult(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB12_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_ult:
; RV64IZFBFMIN: # %bb.0:
@@ -461,7 +461,7 @@ define void @br_fcmp_ult(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB12_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp ult bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -483,7 +483,7 @@ define void @br_fcmp_ule(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB13_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_ule:
; RV64IZFBFMIN: # %bb.0:
@@ -496,7 +496,7 @@ define void @br_fcmp_ule(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB13_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp ule bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -518,7 +518,7 @@ define void @br_fcmp_une(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB14_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_une:
; RV64IZFBFMIN: # %bb.0:
@@ -531,7 +531,7 @@ define void @br_fcmp_une(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB14_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp une bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -555,7 +555,7 @@ define void @br_fcmp_uno(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB15_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_uno:
; RV64IZFBFMIN: # %bb.0:
@@ -570,7 +570,7 @@ define void @br_fcmp_uno(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB15_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp uno bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -590,7 +590,7 @@ define void @br_fcmp_true(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: .LBB16_2: # %if.then
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call abort@plt
+; RV32IZFBFMIN-NEXT: call abort
;
; RV64IZFBFMIN-LABEL: br_fcmp_true:
; RV64IZFBFMIN: # %bb.0:
@@ -601,7 +601,7 @@ define void @br_fcmp_true(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: .LBB16_2: # %if.then
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call abort@plt
+; RV64IZFBFMIN-NEXT: call abort
%1 = fcmp true bfloat %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
index bfa2c3bb4a8b..d533607ad54e 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
@@ -419,7 +419,7 @@ define i64 @fcvt_l_bf16(bfloat %a) nounwind {
; CHECK32ZFBFMIN-NEXT: addi sp, sp, -16
; CHECK32ZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0
-; CHECK32ZFBFMIN-NEXT: call __fixsfdi@plt
+; CHECK32ZFBFMIN-NEXT: call __fixsfdi
; CHECK32ZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32ZFBFMIN-NEXT: addi sp, sp, 16
; CHECK32ZFBFMIN-NEXT: ret
@@ -431,7 +431,7 @@ define i64 @fcvt_l_bf16(bfloat %a) nounwind {
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: slli a0, a0, 16
; RV32ID-NEXT: fmv.w.x fa0, a0
-; RV32ID-NEXT: call __fixsfdi@plt
+; RV32ID-NEXT: call __fixsfdi
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
; RV32ID-NEXT: ret
@@ -465,7 +465,7 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
; RV32IZFBFMIN-NEXT: fmv.w.x fa5, a0
; RV32IZFBFMIN-NEXT: fle.s s0, fa5, fs0
; RV32IZFBFMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFBFMIN-NEXT: call __fixsfdi@plt
+; RV32IZFBFMIN-NEXT: call __fixsfdi
; RV32IZFBFMIN-NEXT: lui a4, 524288
; RV32IZFBFMIN-NEXT: lui a2, 524288
; RV32IZFBFMIN-NEXT: beqz s0, .LBB10_2
@@ -504,7 +504,7 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
; R32IDZFBFMIN-NEXT: fmv.w.x fa5, a0
; R32IDZFBFMIN-NEXT: fle.s s0, fa5, fs0
; R32IDZFBFMIN-NEXT: fmv.s fa0, fs0
-; R32IDZFBFMIN-NEXT: call __fixsfdi@plt
+; R32IDZFBFMIN-NEXT: call __fixsfdi
; R32IDZFBFMIN-NEXT: lui a4, 524288
; R32IDZFBFMIN-NEXT: lui a2, 524288
; R32IDZFBFMIN-NEXT: beqz s0, .LBB10_2
@@ -545,7 +545,7 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
; RV32ID-NEXT: fmv.w.x fa5, a0
; RV32ID-NEXT: fle.s s0, fa5, fs0
; RV32ID-NEXT: fmv.s fa0, fs0
-; RV32ID-NEXT: call __fixsfdi@plt
+; RV32ID-NEXT: call __fixsfdi
; RV32ID-NEXT: lui a4, 524288
; RV32ID-NEXT: lui a2, 524288
; RV32ID-NEXT: beqz s0, .LBB10_2
@@ -606,7 +606,7 @@ define i64 @fcvt_lu_bf16(bfloat %a) nounwind {
; CHECK32ZFBFMIN-NEXT: addi sp, sp, -16
; CHECK32ZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0
-; CHECK32ZFBFMIN-NEXT: call __fixunssfdi@plt
+; CHECK32ZFBFMIN-NEXT: call __fixunssfdi
; CHECK32ZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32ZFBFMIN-NEXT: addi sp, sp, 16
; CHECK32ZFBFMIN-NEXT: ret
@@ -618,7 +618,7 @@ define i64 @fcvt_lu_bf16(bfloat %a) nounwind {
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: slli a0, a0, 16
; RV32ID-NEXT: fmv.w.x fa0, a0
-; RV32ID-NEXT: call __fixunssfdi@plt
+; RV32ID-NEXT: call __fixunssfdi
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
; RV32ID-NEXT: ret
@@ -655,7 +655,7 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind {
; CHECK32ZFBFMIN-NEXT: fmv.w.x fa5, zero
; CHECK32ZFBFMIN-NEXT: fle.s a0, fa5, fa0
; CHECK32ZFBFMIN-NEXT: neg s1, a0
-; CHECK32ZFBFMIN-NEXT: call __fixunssfdi@plt
+; CHECK32ZFBFMIN-NEXT: call __fixunssfdi
; CHECK32ZFBFMIN-NEXT: and a0, s1, a0
; CHECK32ZFBFMIN-NEXT: or a0, s0, a0
; CHECK32ZFBFMIN-NEXT: and a1, s1, a1
@@ -682,7 +682,7 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind {
; RV32ID-NEXT: fmv.w.x fa5, zero
; RV32ID-NEXT: fle.s a0, fa5, fa0
; RV32ID-NEXT: neg s1, a0
-; RV32ID-NEXT: call __fixunssfdi@plt
+; RV32ID-NEXT: call __fixunssfdi
; RV32ID-NEXT: and a0, s1, a0
; RV32ID-NEXT: or a0, s0, a0
; RV32ID-NEXT: and a1, s1, a1
@@ -736,7 +736,7 @@ define bfloat @fcvt_bf16_si(i16 %a) nounwind {
; RV32ID-NEXT: slli a0, a0, 16
; RV32ID-NEXT: srai a0, a0, 16
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -760,7 +760,7 @@ define bfloat @fcvt_bf16_si(i16 %a) nounwind {
; RV64ID-NEXT: slli a0, a0, 48
; RV64ID-NEXT: srai a0, a0, 48
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -784,7 +784,7 @@ define bfloat @fcvt_bf16_si_signext(i16 signext %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -804,7 +804,7 @@ define bfloat @fcvt_bf16_si_signext(i16 signext %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -832,7 +832,7 @@ define bfloat @fcvt_bf16_ui(i16 %a) nounwind {
; RV32ID-NEXT: slli a0, a0, 16
; RV32ID-NEXT: srli a0, a0, 16
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -856,7 +856,7 @@ define bfloat @fcvt_bf16_ui(i16 %a) nounwind {
; RV64ID-NEXT: slli a0, a0, 48
; RV64ID-NEXT: srli a0, a0, 48
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -880,7 +880,7 @@ define bfloat @fcvt_bf16_ui_zeroext(i16 zeroext %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -900,7 +900,7 @@ define bfloat @fcvt_bf16_ui_zeroext(i16 zeroext %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -924,7 +924,7 @@ define bfloat @fcvt_bf16_w(i32 %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -945,7 +945,7 @@ define bfloat @fcvt_bf16_w(i32 %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -971,7 +971,7 @@ define bfloat @fcvt_bf16_w_load(ptr %p) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: lw a0, 0(a0)
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -993,7 +993,7 @@ define bfloat @fcvt_bf16_w_load(ptr %p) nounwind {
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: lw a0, 0(a0)
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -1018,7 +1018,7 @@ define bfloat @fcvt_bf16_wu(i32 %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -1040,7 +1040,7 @@ define bfloat @fcvt_bf16_wu(i32 %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -1066,7 +1066,7 @@ define bfloat @fcvt_bf16_wu_load(ptr %p) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: lw a0, 0(a0)
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -1088,7 +1088,7 @@ define bfloat @fcvt_bf16_wu_load(ptr %p) nounwind {
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: lwu a0, 0(a0)
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -1123,7 +1123,7 @@ define bfloat @fcvt_bf16_s(float %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -1141,7 +1141,7 @@ define bfloat @fcvt_bf16_s(float %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -1186,7 +1186,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
; RV32IZFBFMIN: # %bb.0:
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFBFMIN-NEXT: call __truncdfbf2@plt
+; RV32IZFBFMIN-NEXT: call __truncdfbf2
; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFBFMIN-NEXT: addi sp, sp, 16
; RV32IZFBFMIN-NEXT: ret
@@ -1201,7 +1201,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __truncdfbf2@plt
+; RV32ID-NEXT: call __truncdfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -1214,7 +1214,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
; RV64IZFBFMIN: # %bb.0:
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFBFMIN-NEXT: call __truncdfbf2@plt
+; RV64IZFBFMIN-NEXT: call __truncdfbf2
; RV64IZFBFMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFBFMIN-NEXT: addi sp, sp, 16
; RV64IZFBFMIN-NEXT: ret
@@ -1229,7 +1229,7 @@ define bfloat @fcvt_bf16_d(double %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __truncdfbf2@plt
+; RV64ID-NEXT: call __truncdfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -1247,7 +1247,7 @@ define double @fcvt_d_bf16(bfloat %a) nounwind {
; RV32IZFBFMIN-NEXT: addi sp, sp, -16
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0
-; RV32IZFBFMIN-NEXT: call __extendsfdf2@plt
+; RV32IZFBFMIN-NEXT: call __extendsfdf2
; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFBFMIN-NEXT: addi sp, sp, 16
; RV32IZFBFMIN-NEXT: ret
@@ -1271,7 +1271,7 @@ define double @fcvt_d_bf16(bfloat %a) nounwind {
; RV64IZFBFMIN-NEXT: addi sp, sp, -16
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0
-; RV64IZFBFMIN-NEXT: call __extendsfdf2@plt
+; RV64IZFBFMIN-NEXT: call __extendsfdf2
; RV64IZFBFMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFBFMIN-NEXT: addi sp, sp, 16
; RV64IZFBFMIN-NEXT: ret
@@ -1363,7 +1363,7 @@ define signext i32 @fcvt_bf16_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32ID-NEXT: mv s0, a1
; RV32ID-NEXT: addi s1, a0, 1
; RV32ID-NEXT: fcvt.s.w fa0, s1
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: sh a0, 0(s0)
; RV32ID-NEXT: mv a0, s1
@@ -1390,7 +1390,7 @@ define signext i32 @fcvt_bf16_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64ID-NEXT: mv s0, a1
; RV64ID-NEXT: addiw s1, a0, 1
; RV64ID-NEXT: fcvt.s.w fa0, s1
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: sh a0, 0(s0)
; RV64ID-NEXT: mv a0, s1
@@ -1423,7 +1423,7 @@ define signext i32 @fcvt_bf16_wu_demanded_bits(i32 signext %0, ptr %1) nounwind
; RV32ID-NEXT: mv s0, a1
; RV32ID-NEXT: addi s1, a0, 1
; RV32ID-NEXT: fcvt.s.wu fa0, s1
-; RV32ID-NEXT: call __truncsfbf2@plt
+; RV32ID-NEXT: call __truncsfbf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: sh a0, 0(s0)
; RV32ID-NEXT: mv a0, s1
@@ -1452,7 +1452,7 @@ define signext i32 @fcvt_bf16_wu_demanded_bits(i32 signext %0, ptr %1) nounwind
; RV64ID-NEXT: mv s0, a1
; RV64ID-NEXT: addiw s1, a0, 1
; RV64ID-NEXT: fcvt.s.wu fa0, s1
-; RV64ID-NEXT: call __truncsfbf2@plt
+; RV64ID-NEXT: call __truncsfbf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: sh a0, 0(s0)
; RV64ID-NEXT: mv a0, s1
diff --git a/llvm/test/CodeGen/RISCV/bfloat-frem.ll b/llvm/test/CodeGen/RISCV/bfloat-frem.ll
index fd6db9dfc3f7..ac8b99d1ce6d 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-frem.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-frem.ll
@@ -11,7 +11,7 @@ define bfloat @frem_bf16(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0
; RV32IZFBFMIN-NEXT: fcvt.s.bf16 fa1, fa1
-; RV32IZFBFMIN-NEXT: call fmodf@plt
+; RV32IZFBFMIN-NEXT: call fmodf
; RV32IZFBFMIN-NEXT: fcvt.bf16.s fa0, fa0
; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFBFMIN-NEXT: addi sp, sp, 16
@@ -23,7 +23,7 @@ define bfloat @frem_bf16(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0
; RV64IZFBFMIN-NEXT: fcvt.s.bf16 fa1, fa1
-; RV64IZFBFMIN-NEXT: call fmodf@plt
+; RV64IZFBFMIN-NEXT: call fmodf
; RV64IZFBFMIN-NEXT: fcvt.bf16.s fa0, fa0
; RV64IZFBFMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFBFMIN-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/bfloat-mem.ll b/llvm/test/CodeGen/RISCV/bfloat-mem.ll
index 1119611b8212..4b6c0c29d660 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-mem.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-mem.ll
@@ -109,7 +109,7 @@ define bfloat @flh_stack(bfloat %a) nounwind {
; RV32IZFBFMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IZFBFMIN-NEXT: fmv.s fs0, fa0
; RV32IZFBFMIN-NEXT: addi a0, sp, 4
-; RV32IZFBFMIN-NEXT: call notdead@plt
+; RV32IZFBFMIN-NEXT: call notdead
; RV32IZFBFMIN-NEXT: flh fa5, 4(sp)
; RV32IZFBFMIN-NEXT: fcvt.s.bf16 fa4, fs0
; RV32IZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa5
@@ -127,7 +127,7 @@ define bfloat @flh_stack(bfloat %a) nounwind {
; RV64IZFBFMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV64IZFBFMIN-NEXT: fmv.s fs0, fa0
; RV64IZFBFMIN-NEXT: mv a0, sp
-; RV64IZFBFMIN-NEXT: call notdead@plt
+; RV64IZFBFMIN-NEXT: call notdead
; RV64IZFBFMIN-NEXT: flh fa5, 0(sp)
; RV64IZFBFMIN-NEXT: fcvt.s.bf16 fa4, fs0
; RV64IZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa5
@@ -155,7 +155,7 @@ define dso_local void @fsh_stack(bfloat %a, bfloat %b) nounwind {
; RV32IZFBFMIN-NEXT: fcvt.bf16.s fa5, fa5
; RV32IZFBFMIN-NEXT: fsh fa5, 8(sp)
; RV32IZFBFMIN-NEXT: addi a0, sp, 8
-; RV32IZFBFMIN-NEXT: call notdead@plt
+; RV32IZFBFMIN-NEXT: call notdead
; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFBFMIN-NEXT: addi sp, sp, 16
; RV32IZFBFMIN-NEXT: ret
@@ -170,7 +170,7 @@ define dso_local void @fsh_stack(bfloat %a, bfloat %b) nounwind {
; RV64IZFBFMIN-NEXT: fcvt.bf16.s fa5, fa5
; RV64IZFBFMIN-NEXT: fsh fa5, 4(sp)
; RV64IZFBFMIN-NEXT: addi a0, sp, 4
-; RV64IZFBFMIN-NEXT: call notdead@plt
+; RV64IZFBFMIN-NEXT: call notdead
; RV64IZFBFMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFBFMIN-NEXT: addi sp, sp, 16
; RV64IZFBFMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/bfloat.ll b/llvm/test/CodeGen/RISCV/bfloat.ll
index d62f35388123..9dc8ce6be1ea 100644
--- a/llvm/test/CodeGen/RISCV/bfloat.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat.ll
@@ -11,7 +11,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
; RV32I-ILP32: # %bb.0:
; RV32I-ILP32-NEXT: addi sp, sp, -16
; RV32I-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT: call __truncsfbf2@plt
+; RV32I-ILP32-NEXT: call __truncsfbf2
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -20,7 +20,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
; RV64I-LP64: # %bb.0:
; RV64I-LP64-NEXT: addi sp, sp, -16
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT: call __truncsfbf2@plt
+; RV64I-LP64-NEXT: call __truncsfbf2
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -29,7 +29,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfbf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -40,7 +40,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64-NEXT: call __truncsfbf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -51,7 +51,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
; RV32ID-ILP32D: # %bb.0:
; RV32ID-ILP32D-NEXT: addi sp, sp, -16
; RV32ID-ILP32D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32D-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32D-NEXT: call __truncsfbf2
; RV32ID-ILP32D-NEXT: fmv.x.w a0, fa0
; RV32ID-ILP32D-NEXT: lui a1, 1048560
; RV32ID-ILP32D-NEXT: or a0, a0, a1
@@ -64,7 +64,7 @@ define bfloat @float_to_bfloat(float %a) nounwind {
; RV64ID-LP64D: # %bb.0:
; RV64ID-LP64D-NEXT: addi sp, sp, -16
; RV64ID-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64D-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64D-NEXT: call __truncsfbf2
; RV64ID-LP64D-NEXT: fmv.x.w a0, fa0
; RV64ID-LP64D-NEXT: lui a1, 1048560
; RV64ID-LP64D-NEXT: or a0, a0, a1
@@ -81,7 +81,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
; RV32I-ILP32: # %bb.0:
; RV32I-ILP32-NEXT: addi sp, sp, -16
; RV32I-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT: call __truncdfbf2@plt
+; RV32I-ILP32-NEXT: call __truncdfbf2
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -90,7 +90,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
; RV64I-LP64: # %bb.0:
; RV64I-LP64-NEXT: addi sp, sp, -16
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT: call __truncdfbf2@plt
+; RV64I-LP64-NEXT: call __truncdfbf2
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -99,7 +99,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __truncdfbf2@plt
+; RV32ID-ILP32-NEXT: call __truncdfbf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -110,7 +110,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __truncdfbf2@plt
+; RV64ID-LP64-NEXT: call __truncdfbf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -121,7 +121,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
; RV32ID-ILP32D: # %bb.0:
; RV32ID-ILP32D-NEXT: addi sp, sp, -16
; RV32ID-ILP32D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32D-NEXT: call __truncdfbf2@plt
+; RV32ID-ILP32D-NEXT: call __truncdfbf2
; RV32ID-ILP32D-NEXT: fmv.x.w a0, fa0
; RV32ID-ILP32D-NEXT: lui a1, 1048560
; RV32ID-ILP32D-NEXT: or a0, a0, a1
@@ -134,7 +134,7 @@ define bfloat @double_to_bfloat(double %a) nounwind {
; RV64ID-LP64D: # %bb.0:
; RV64ID-LP64D-NEXT: addi sp, sp, -16
; RV64ID-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64D-NEXT: call __truncdfbf2@plt
+; RV64ID-LP64D-NEXT: call __truncdfbf2
; RV64ID-LP64D-NEXT: fmv.x.w a0, fa0
; RV64ID-LP64D-NEXT: lui a1, 1048560
; RV64ID-LP64D-NEXT: or a0, a0, a1
@@ -190,7 +190,7 @@ define double @bfloat_to_double(bfloat %a) nounwind {
; RV32I-ILP32-NEXT: addi sp, sp, -16
; RV32I-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-ILP32-NEXT: slli a0, a0, 16
-; RV32I-ILP32-NEXT: call __extendsfdf2@plt
+; RV32I-ILP32-NEXT: call __extendsfdf2
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -200,7 +200,7 @@ define double @bfloat_to_double(bfloat %a) nounwind {
; RV64I-LP64-NEXT: addi sp, sp, -16
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-LP64-NEXT: slliw a0, a0, 16
-; RV64I-LP64-NEXT: call __extendsfdf2@plt
+; RV64I-LP64-NEXT: call __extendsfdf2
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -319,8 +319,8 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
; RV32I-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-ILP32-NEXT: slli a0, a0, 16
; RV32I-ILP32-NEXT: slli a1, a1, 16
-; RV32I-ILP32-NEXT: call __addsf3@plt
-; RV32I-ILP32-NEXT: call __truncsfbf2@plt
+; RV32I-ILP32-NEXT: call __addsf3
+; RV32I-ILP32-NEXT: call __truncsfbf2
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -331,8 +331,8 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-LP64-NEXT: slliw a0, a0, 16
; RV64I-LP64-NEXT: slliw a1, a1, 16
-; RV64I-LP64-NEXT: call __addsf3@plt
-; RV64I-LP64-NEXT: call __truncsfbf2@plt
+; RV64I-LP64-NEXT: call __addsf3
+; RV64I-LP64-NEXT: call __truncsfbf2
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -347,7 +347,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
; RV32ID-ILP32-NEXT: fadd.s fa5, fa4, fa5
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfbf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -364,7 +364,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
; RV64ID-LP64-NEXT: fadd.s fa5, fa4, fa5
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64-NEXT: call __truncsfbf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -382,7 +382,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
; RV32ID-ILP32D-NEXT: slli a0, a0, 16
; RV32ID-ILP32D-NEXT: fmv.w.x fa4, a0
; RV32ID-ILP32D-NEXT: fadd.s fa0, fa4, fa5
-; RV32ID-ILP32D-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32D-NEXT: call __truncsfbf2
; RV32ID-ILP32D-NEXT: fmv.x.w a0, fa0
; RV32ID-ILP32D-NEXT: lui a1, 1048560
; RV32ID-ILP32D-NEXT: or a0, a0, a1
@@ -402,7 +402,7 @@ define bfloat @bfloat_add(bfloat %a, bfloat %b) nounwind {
; RV64ID-LP64D-NEXT: slli a0, a0, 16
; RV64ID-LP64D-NEXT: fmv.w.x fa4, a0
; RV64ID-LP64D-NEXT: fadd.s fa0, fa4, fa5
-; RV64ID-LP64D-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64D-NEXT: call __truncsfbf2
; RV64ID-LP64D-NEXT: fmv.x.w a0, fa0
; RV64ID-LP64D-NEXT: lui a1, 1048560
; RV64ID-LP64D-NEXT: or a0, a0, a1
@@ -423,8 +423,8 @@ define bfloat @bfloat_load(ptr %a) nounwind {
; RV32I-ILP32-NEXT: lh a2, 6(a0)
; RV32I-ILP32-NEXT: slli a0, a1, 16
; RV32I-ILP32-NEXT: slli a1, a2, 16
-; RV32I-ILP32-NEXT: call __addsf3@plt
-; RV32I-ILP32-NEXT: call __truncsfbf2@plt
+; RV32I-ILP32-NEXT: call __addsf3
+; RV32I-ILP32-NEXT: call __truncsfbf2
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -437,8 +437,8 @@ define bfloat @bfloat_load(ptr %a) nounwind {
; RV64I-LP64-NEXT: lh a2, 6(a0)
; RV64I-LP64-NEXT: slliw a0, a1, 16
; RV64I-LP64-NEXT: slliw a1, a2, 16
-; RV64I-LP64-NEXT: call __addsf3@plt
-; RV64I-LP64-NEXT: call __truncsfbf2@plt
+; RV64I-LP64-NEXT: call __addsf3
+; RV64I-LP64-NEXT: call __truncsfbf2
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -455,7 +455,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
; RV32ID-ILP32-NEXT: fadd.s fa5, fa4, fa5
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfbf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -474,7 +474,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
; RV64ID-LP64-NEXT: fadd.s fa5, fa4, fa5
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64-NEXT: call __truncsfbf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -492,7 +492,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
; RV32ID-ILP32D-NEXT: slli a0, a0, 16
; RV32ID-ILP32D-NEXT: fmv.w.x fa4, a0
; RV32ID-ILP32D-NEXT: fadd.s fa0, fa4, fa5
-; RV32ID-ILP32D-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32D-NEXT: call __truncsfbf2
; RV32ID-ILP32D-NEXT: fmv.x.w a0, fa0
; RV32ID-ILP32D-NEXT: lui a1, 1048560
; RV32ID-ILP32D-NEXT: or a0, a0, a1
@@ -512,7 +512,7 @@ define bfloat @bfloat_load(ptr %a) nounwind {
; RV64ID-LP64D-NEXT: slli a0, a0, 16
; RV64ID-LP64D-NEXT: fmv.w.x fa4, a0
; RV64ID-LP64D-NEXT: fadd.s fa0, fa4, fa5
-; RV64ID-LP64D-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64D-NEXT: call __truncsfbf2
; RV64ID-LP64D-NEXT: fmv.x.w a0, fa0
; RV64ID-LP64D-NEXT: lui a1, 1048560
; RV64ID-LP64D-NEXT: or a0, a0, a1
@@ -536,8 +536,8 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
; RV32I-ILP32-NEXT: mv s0, a0
; RV32I-ILP32-NEXT: slli a0, a1, 16
; RV32I-ILP32-NEXT: slli a1, a2, 16
-; RV32I-ILP32-NEXT: call __addsf3@plt
-; RV32I-ILP32-NEXT: call __truncsfbf2@plt
+; RV32I-ILP32-NEXT: call __addsf3
+; RV32I-ILP32-NEXT: call __truncsfbf2
; RV32I-ILP32-NEXT: sh a0, 0(s0)
; RV32I-ILP32-NEXT: sh a0, 16(s0)
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -553,8 +553,8 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
; RV64I-LP64-NEXT: mv s0, a0
; RV64I-LP64-NEXT: slliw a0, a1, 16
; RV64I-LP64-NEXT: slliw a1, a2, 16
-; RV64I-LP64-NEXT: call __addsf3@plt
-; RV64I-LP64-NEXT: call __truncsfbf2@plt
+; RV64I-LP64-NEXT: call __addsf3
+; RV64I-LP64-NEXT: call __truncsfbf2
; RV64I-LP64-NEXT: sh a0, 0(s0)
; RV64I-LP64-NEXT: sh a0, 16(s0)
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -574,7 +574,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a1
; RV32ID-ILP32-NEXT: fadd.s fa5, fa4, fa5
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfbf2
; RV32ID-ILP32-NEXT: sh a0, 0(s0)
; RV32ID-ILP32-NEXT: sh a0, 16(s0)
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -594,7 +594,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
; RV64ID-LP64-NEXT: fmv.w.x fa4, a1
; RV64ID-LP64-NEXT: fadd.s fa5, fa4, fa5
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64-NEXT: call __truncsfbf2
; RV64ID-LP64-NEXT: sh a0, 0(s0)
; RV64ID-LP64-NEXT: sh a0, 16(s0)
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -615,7 +615,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
; RV32ID-ILP32D-NEXT: slli a0, a0, 16
; RV32ID-ILP32D-NEXT: fmv.w.x fa4, a0
; RV32ID-ILP32D-NEXT: fadd.s fa0, fa4, fa5
-; RV32ID-ILP32D-NEXT: call __truncsfbf2@plt
+; RV32ID-ILP32D-NEXT: call __truncsfbf2
; RV32ID-ILP32D-NEXT: fmv.x.w a0, fa0
; RV32ID-ILP32D-NEXT: sh a0, 0(s0)
; RV32ID-ILP32D-NEXT: sh a0, 16(s0)
@@ -637,7 +637,7 @@ define void @bfloat_store(ptr %a, bfloat %b, bfloat %c) nounwind {
; RV64ID-LP64D-NEXT: slli a0, a0, 16
; RV64ID-LP64D-NEXT: fmv.w.x fa4, a0
; RV64ID-LP64D-NEXT: fadd.s fa0, fa4, fa5
-; RV64ID-LP64D-NEXT: call __truncsfbf2@plt
+; RV64ID-LP64D-NEXT: call __truncsfbf2
; RV64ID-LP64D-NEXT: fmv.x.w a0, fa0
; RV64ID-LP64D-NEXT: sh a0, 0(s0)
; RV64ID-LP64D-NEXT: sh a0, 16(s0)
diff --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
index a05c518bbf3a..d280e5ee46b7 100644
--- a/llvm/test/CodeGen/RISCV/bittest.ll
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -452,7 +452,7 @@ define void @bittest_switch(i32 signext %0) {
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: beqz a0, .LBB14_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB14_3:
; RV32I-NEXT: ret
;
@@ -468,7 +468,7 @@ define void @bittest_switch(i32 signext %0) {
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: beqz a0, .LBB14_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB14_3:
; RV64I-NEXT: ret
;
@@ -482,7 +482,7 @@ define void @bittest_switch(i32 signext %0) {
; RV32ZBS-NEXT: bext a0, a1, a0
; RV32ZBS-NEXT: beqz a0, .LBB14_3
; RV32ZBS-NEXT: # %bb.2:
-; RV32ZBS-NEXT: tail bar@plt
+; RV32ZBS-NEXT: tail bar
; RV32ZBS-NEXT: .LBB14_3:
; RV32ZBS-NEXT: ret
;
@@ -497,7 +497,7 @@ define void @bittest_switch(i32 signext %0) {
; RV64ZBS-NEXT: bext a0, a1, a0
; RV64ZBS-NEXT: beqz a0, .LBB14_3
; RV64ZBS-NEXT: # %bb.2:
-; RV64ZBS-NEXT: tail bar@plt
+; RV64ZBS-NEXT: tail bar
; RV64ZBS-NEXT: .LBB14_3:
; RV64ZBS-NEXT: ret
;
@@ -512,7 +512,7 @@ define void @bittest_switch(i32 signext %0) {
; RV32XTHEADBS-NEXT: andi a0, a0, 1
; RV32XTHEADBS-NEXT: beqz a0, .LBB14_3
; RV32XTHEADBS-NEXT: # %bb.2:
-; RV32XTHEADBS-NEXT: tail bar@plt
+; RV32XTHEADBS-NEXT: tail bar
; RV32XTHEADBS-NEXT: .LBB14_3:
; RV32XTHEADBS-NEXT: ret
;
@@ -528,7 +528,7 @@ define void @bittest_switch(i32 signext %0) {
; RV64XTHEADBS-NEXT: andi a0, a0, 1
; RV64XTHEADBS-NEXT: beqz a0, .LBB14_3
; RV64XTHEADBS-NEXT: # %bb.2:
-; RV64XTHEADBS-NEXT: tail bar@plt
+; RV64XTHEADBS-NEXT: tail bar
; RV64XTHEADBS-NEXT: .LBB14_3:
; RV64XTHEADBS-NEXT: ret
switch i32 %0, label %3 [
@@ -1243,7 +1243,7 @@ define void @bit_10_z_branch_i32(i32 signext %0) {
; CHECK-NEXT: andi a0, a0, 1024
; CHECK-NEXT: bnez a0, .LBB37_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB37_2:
; CHECK-NEXT: ret
%2 = and i32 %0, 1024
@@ -1264,7 +1264,7 @@ define void @bit_10_nz_branch_i32(i32 signext %0) {
; CHECK-NEXT: andi a0, a0, 1024
; CHECK-NEXT: beqz a0, .LBB38_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB38_2:
; CHECK-NEXT: ret
%2 = and i32 %0, 1024
@@ -1285,7 +1285,7 @@ define void @bit_11_z_branch_i32(i32 signext %0) {
; RV32-NEXT: slli a0, a0, 20
; RV32-NEXT: bltz a0, .LBB39_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB39_2:
; RV32-NEXT: ret
;
@@ -1294,7 +1294,7 @@ define void @bit_11_z_branch_i32(i32 signext %0) {
; RV64-NEXT: slli a0, a0, 52
; RV64-NEXT: bltz a0, .LBB39_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB39_2:
; RV64-NEXT: ret
%2 = and i32 %0, 2048
@@ -1315,7 +1315,7 @@ define void @bit_11_nz_branch_i32(i32 signext %0) {
; RV32-NEXT: slli a0, a0, 20
; RV32-NEXT: bgez a0, .LBB40_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB40_2:
; RV32-NEXT: ret
;
@@ -1324,7 +1324,7 @@ define void @bit_11_nz_branch_i32(i32 signext %0) {
; RV64-NEXT: slli a0, a0, 52
; RV64-NEXT: bgez a0, .LBB40_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB40_2:
; RV64-NEXT: ret
%2 = and i32 %0, 2048
@@ -1345,7 +1345,7 @@ define void @bit_24_z_branch_i32(i32 signext %0) {
; RV32-NEXT: slli a0, a0, 7
; RV32-NEXT: bltz a0, .LBB41_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB41_2:
; RV32-NEXT: ret
;
@@ -1354,7 +1354,7 @@ define void @bit_24_z_branch_i32(i32 signext %0) {
; RV64-NEXT: slli a0, a0, 39
; RV64-NEXT: bltz a0, .LBB41_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB41_2:
; RV64-NEXT: ret
%2 = and i32 %0, 16777216
@@ -1375,7 +1375,7 @@ define void @bit_24_nz_branch_i32(i32 signext %0) {
; RV32-NEXT: slli a0, a0, 7
; RV32-NEXT: bgez a0, .LBB42_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB42_2:
; RV32-NEXT: ret
;
@@ -1384,7 +1384,7 @@ define void @bit_24_nz_branch_i32(i32 signext %0) {
; RV64-NEXT: slli a0, a0, 39
; RV64-NEXT: bgez a0, .LBB42_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB42_2:
; RV64-NEXT: ret
%2 = and i32 %0, 16777216
@@ -1404,7 +1404,7 @@ define void @bit_31_z_branch_i32(i32 signext %0) {
; RV32: # %bb.0:
; RV32-NEXT: bltz a0, .LBB43_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB43_2:
; RV32-NEXT: ret
;
@@ -1414,7 +1414,7 @@ define void @bit_31_z_branch_i32(i32 signext %0) {
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: bnez a0, .LBB43_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB43_2:
; RV64-NEXT: ret
%2 = and i32 %0, 2147483648
@@ -1434,7 +1434,7 @@ define void @bit_31_nz_branch_i32(i32 signext %0) {
; RV32: # %bb.0:
; RV32-NEXT: bgez a0, .LBB44_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB44_2:
; RV32-NEXT: ret
;
@@ -1444,7 +1444,7 @@ define void @bit_31_nz_branch_i32(i32 signext %0) {
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: beqz a0, .LBB44_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB44_2:
; RV64-NEXT: ret
%2 = and i32 %0, 2147483648
@@ -1465,7 +1465,7 @@ define void @bit_10_z_branch_i64(i64 %0) {
; CHECK-NEXT: andi a0, a0, 1024
; CHECK-NEXT: bnez a0, .LBB45_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB45_2:
; CHECK-NEXT: ret
%2 = and i64 %0, 1024
@@ -1486,7 +1486,7 @@ define void @bit_10_nz_branch_i64(i64 %0) {
; CHECK-NEXT: andi a0, a0, 1024
; CHECK-NEXT: beqz a0, .LBB46_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: ret
%2 = and i64 %0, 1024
@@ -1507,7 +1507,7 @@ define void @bit_11_z_branch_i64(i64 %0) {
; RV32-NEXT: slli a0, a0, 20
; RV32-NEXT: bltz a0, .LBB47_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB47_2:
; RV32-NEXT: ret
;
@@ -1516,7 +1516,7 @@ define void @bit_11_z_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 52
; RV64-NEXT: bltz a0, .LBB47_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB47_2:
; RV64-NEXT: ret
%2 = and i64 %0, 2048
@@ -1537,7 +1537,7 @@ define void @bit_11_nz_branch_i64(i64 %0) {
; RV32-NEXT: slli a0, a0, 20
; RV32-NEXT: bgez a0, .LBB48_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB48_2:
; RV32-NEXT: ret
;
@@ -1546,7 +1546,7 @@ define void @bit_11_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 52
; RV64-NEXT: bgez a0, .LBB48_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB48_2:
; RV64-NEXT: ret
%2 = and i64 %0, 2048
@@ -1567,7 +1567,7 @@ define void @bit_24_z_branch_i64(i64 %0) {
; RV32-NEXT: slli a0, a0, 7
; RV32-NEXT: bltz a0, .LBB49_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB49_2:
; RV32-NEXT: ret
;
@@ -1576,7 +1576,7 @@ define void @bit_24_z_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 39
; RV64-NEXT: bltz a0, .LBB49_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB49_2:
; RV64-NEXT: ret
%2 = and i64 %0, 16777216
@@ -1597,7 +1597,7 @@ define void @bit_24_nz_branch_i64(i64 %0) {
; RV32-NEXT: slli a0, a0, 7
; RV32-NEXT: bgez a0, .LBB50_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB50_2:
; RV32-NEXT: ret
;
@@ -1606,7 +1606,7 @@ define void @bit_24_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 39
; RV64-NEXT: bgez a0, .LBB50_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB50_2:
; RV64-NEXT: ret
%2 = and i64 %0, 16777216
@@ -1626,7 +1626,7 @@ define void @bit_31_z_branch_i64(i64 %0) {
; RV32: # %bb.0:
; RV32-NEXT: bltz a0, .LBB51_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB51_2:
; RV32-NEXT: ret
;
@@ -1635,7 +1635,7 @@ define void @bit_31_z_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: bltz a0, .LBB51_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB51_2:
; RV64-NEXT: ret
%2 = and i64 %0, 2147483648
@@ -1655,7 +1655,7 @@ define void @bit_31_nz_branch_i64(i64 %0) {
; RV32: # %bb.0:
; RV32-NEXT: bgez a0, .LBB52_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB52_2:
; RV32-NEXT: ret
;
@@ -1664,7 +1664,7 @@ define void @bit_31_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: bgez a0, .LBB52_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB52_2:
; RV64-NEXT: ret
%2 = and i64 %0, 2147483648
@@ -1685,7 +1685,7 @@ define void @bit_32_z_branch_i64(i64 %0) {
; RV32-NEXT: andi a1, a1, 1
; RV32-NEXT: bnez a1, .LBB53_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB53_2:
; RV32-NEXT: ret
;
@@ -1694,7 +1694,7 @@ define void @bit_32_z_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 31
; RV64-NEXT: bltz a0, .LBB53_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB53_2:
; RV64-NEXT: ret
%2 = and i64 %0, 4294967296
@@ -1715,7 +1715,7 @@ define void @bit_32_nz_branch_i64(i64 %0) {
; RV32-NEXT: andi a1, a1, 1
; RV32-NEXT: beqz a1, .LBB54_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB54_2:
; RV32-NEXT: ret
;
@@ -1724,7 +1724,7 @@ define void @bit_32_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 31
; RV64-NEXT: bgez a0, .LBB54_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB54_2:
; RV64-NEXT: ret
%2 = and i64 %0, 4294967296
@@ -1745,7 +1745,7 @@ define void @bit_62_z_branch_i64(i64 %0) {
; RV32-NEXT: slli a1, a1, 1
; RV32-NEXT: bltz a1, .LBB55_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB55_2:
; RV32-NEXT: ret
;
@@ -1754,7 +1754,7 @@ define void @bit_62_z_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: bltz a0, .LBB55_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB55_2:
; RV64-NEXT: ret
%2 = and i64 %0, 4611686018427387904
@@ -1775,7 +1775,7 @@ define void @bit_62_nz_branch_i64(i64 %0) {
; RV32-NEXT: slli a1, a1, 1
; RV32-NEXT: bgez a1, .LBB56_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB56_2:
; RV32-NEXT: ret
;
@@ -1784,7 +1784,7 @@ define void @bit_62_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: bgez a0, .LBB56_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB56_2:
; RV64-NEXT: ret
%2 = and i64 %0, 4611686018427387904
@@ -1804,7 +1804,7 @@ define void @bit_63_z_branch_i64(i64 %0) {
; RV32: # %bb.0:
; RV32-NEXT: bltz a1, .LBB57_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB57_2:
; RV32-NEXT: ret
;
@@ -1812,7 +1812,7 @@ define void @bit_63_z_branch_i64(i64 %0) {
; RV64: # %bb.0:
; RV64-NEXT: bltz a0, .LBB57_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB57_2:
; RV64-NEXT: ret
%2 = and i64 %0, 9223372036854775808
@@ -1832,7 +1832,7 @@ define void @bit_63_nz_branch_i64(i64 %0) {
; RV32: # %bb.0:
; RV32-NEXT: bgez a1, .LBB58_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB58_2:
; RV32-NEXT: ret
;
@@ -1840,7 +1840,7 @@ define void @bit_63_nz_branch_i64(i64 %0) {
; RV64: # %bb.0:
; RV64-NEXT: bgez a0, .LBB58_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB58_2:
; RV64-NEXT: ret
%2 = and i64 %0, 9223372036854775808
@@ -2675,7 +2675,7 @@ define void @bit_10_1_z_branch_i32(i32 signext %0) {
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB89_2:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
%2 = and i32 %0, 1023
%3 = icmp eq i32 %2, 0
br i1 %3, label %4, label %5
@@ -2694,7 +2694,7 @@ define void @bit_10_1_nz_branch_i32(i32 signext %0) {
; CHECK-NEXT: andi a0, a0, 1023
; CHECK-NEXT: beqz a0, .LBB90_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB90_2:
; CHECK-NEXT: ret
%2 = and i32 %0, 1023
@@ -2717,7 +2717,7 @@ define void @bit_11_1_z_branch_i32(i32 signext %0) {
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB91_2:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
%2 = and i32 %0, 2047
%3 = icmp eq i32 %2, 0
br i1 %3, label %4, label %5
@@ -2736,7 +2736,7 @@ define void @bit_11_1_nz_branch_i32(i32 signext %0) {
; CHECK-NEXT: andi a0, a0, 2047
; CHECK-NEXT: beqz a0, .LBB92_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB92_2:
; CHECK-NEXT: ret
%2 = and i32 %0, 2047
@@ -2759,7 +2759,7 @@ define void @bit_16_1_z_branch_i32(i32 signext %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB93_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_16_1_z_branch_i32:
; RV64: # %bb.0:
@@ -2768,7 +2768,7 @@ define void @bit_16_1_z_branch_i32(i32 signext %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB93_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i32 %0, 65535
%3 = icmp eq i32 %2, 0
br i1 %3, label %4, label %5
@@ -2787,7 +2787,7 @@ define void @bit_16_1_nz_branch_i32(i32 signext %0) {
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: beqz a0, .LBB94_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB94_2:
; RV32-NEXT: ret
;
@@ -2796,7 +2796,7 @@ define void @bit_16_1_nz_branch_i32(i32 signext %0) {
; RV64-NEXT: slli a0, a0, 48
; RV64-NEXT: beqz a0, .LBB94_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB94_2:
; RV64-NEXT: ret
%2 = and i32 %0, 65535
@@ -2819,7 +2819,7 @@ define void @bit_24_1_z_branch_i32(i32 signext %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB95_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_24_1_z_branch_i32:
; RV64: # %bb.0:
@@ -2828,7 +2828,7 @@ define void @bit_24_1_z_branch_i32(i32 signext %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB95_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i32 %0, 16777215
%3 = icmp eq i32 %2, 0
br i1 %3, label %4, label %5
@@ -2847,7 +2847,7 @@ define void @bit_24_1_nz_branch_i32(i32 signext %0) {
; RV32-NEXT: slli a0, a0, 8
; RV32-NEXT: beqz a0, .LBB96_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB96_2:
; RV32-NEXT: ret
;
@@ -2856,7 +2856,7 @@ define void @bit_24_1_nz_branch_i32(i32 signext %0) {
; RV64-NEXT: slli a0, a0, 40
; RV64-NEXT: beqz a0, .LBB96_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB96_2:
; RV64-NEXT: ret
%2 = and i32 %0, 16777215
@@ -2879,7 +2879,7 @@ define void @bit_31_1_z_branch_i32(i32 signext %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB97_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_31_1_z_branch_i32:
; RV64: # %bb.0:
@@ -2888,7 +2888,7 @@ define void @bit_31_1_z_branch_i32(i32 signext %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB97_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i32 %0, 2147483647
%3 = icmp eq i32 %2, 0
br i1 %3, label %4, label %5
@@ -2907,7 +2907,7 @@ define void @bit_31_1_nz_branch_i32(i32 signext %0) {
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: beqz a0, .LBB98_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB98_2:
; RV32-NEXT: ret
;
@@ -2916,7 +2916,7 @@ define void @bit_31_1_nz_branch_i32(i32 signext %0) {
; RV64-NEXT: slli a0, a0, 33
; RV64-NEXT: beqz a0, .LBB98_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB98_2:
; RV64-NEXT: ret
%2 = and i32 %0, 2147483647
@@ -2938,7 +2938,7 @@ define void @bit_32_1_z_branch_i32(i32 signext %0) {
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB99_2:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
%2 = and i32 %0, 4294967295
%3 = icmp eq i32 %2, 0
br i1 %3, label %4, label %5
@@ -2956,7 +2956,7 @@ define void @bit_32_1_nz_branch_i32(i32 signext %0) {
; CHECK: # %bb.0:
; CHECK-NEXT: beqz a0, .LBB100_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB100_2:
; CHECK-NEXT: ret
%2 = and i32 %0, 4294967295
@@ -2980,7 +2980,7 @@ define void @bit_10_1_z_branch_i64(i64 %0) {
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB101_2:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
%2 = and i64 %0, 1023
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -2999,7 +2999,7 @@ define void @bit_10_1_nz_branch_i64(i64 %0) {
; CHECK-NEXT: andi a0, a0, 1023
; CHECK-NEXT: beqz a0, .LBB102_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB102_2:
; CHECK-NEXT: ret
%2 = and i64 %0, 1023
@@ -3022,7 +3022,7 @@ define void @bit_11_1_z_branch_i64(i64 %0) {
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB103_2:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
%2 = and i64 %0, 2047
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3041,7 +3041,7 @@ define void @bit_11_1_nz_branch_i64(i64 %0) {
; CHECK-NEXT: andi a0, a0, 2047
; CHECK-NEXT: beqz a0, .LBB104_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB104_2:
; CHECK-NEXT: ret
%2 = and i64 %0, 2047
@@ -3064,7 +3064,7 @@ define void @bit_16_1_z_branch_i64(i64 %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB105_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_16_1_z_branch_i64:
; RV64: # %bb.0:
@@ -3073,7 +3073,7 @@ define void @bit_16_1_z_branch_i64(i64 %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB105_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i64 %0, 65535
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3092,7 +3092,7 @@ define void @bit_16_1_nz_branch_i64(i64 %0) {
; RV32-NEXT: slli a0, a0, 16
; RV32-NEXT: beqz a0, .LBB106_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB106_2:
; RV32-NEXT: ret
;
@@ -3101,7 +3101,7 @@ define void @bit_16_1_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 48
; RV64-NEXT: beqz a0, .LBB106_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB106_2:
; RV64-NEXT: ret
%2 = and i64 %0, 65535
@@ -3124,7 +3124,7 @@ define void @bit_24_1_z_branch_i64(i64 %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB107_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_24_1_z_branch_i64:
; RV64: # %bb.0:
@@ -3133,7 +3133,7 @@ define void @bit_24_1_z_branch_i64(i64 %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB107_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i64 %0, 16777215
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3152,7 +3152,7 @@ define void @bit_24_1_nz_branch_i64(i64 %0) {
; RV32-NEXT: slli a0, a0, 8
; RV32-NEXT: beqz a0, .LBB108_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB108_2:
; RV32-NEXT: ret
;
@@ -3161,7 +3161,7 @@ define void @bit_24_1_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 40
; RV64-NEXT: beqz a0, .LBB108_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB108_2:
; RV64-NEXT: ret
%2 = and i64 %0, 16777215
@@ -3184,7 +3184,7 @@ define void @bit_31_1_z_branch_i64(i64 %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB109_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_31_1_z_branch_i64:
; RV64: # %bb.0:
@@ -3193,7 +3193,7 @@ define void @bit_31_1_z_branch_i64(i64 %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB109_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i64 %0, 2147483647
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3212,7 +3212,7 @@ define void @bit_31_1_nz_branch_i64(i64 %0) {
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: beqz a0, .LBB110_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB110_2:
; RV32-NEXT: ret
;
@@ -3221,7 +3221,7 @@ define void @bit_31_1_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 33
; RV64-NEXT: beqz a0, .LBB110_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB110_2:
; RV64-NEXT: ret
%2 = and i64 %0, 2147483647
@@ -3243,7 +3243,7 @@ define void @bit_32_1_z_branch_i64(i64 %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB111_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_32_1_z_branch_i64:
; RV64: # %bb.0:
@@ -3252,7 +3252,7 @@ define void @bit_32_1_z_branch_i64(i64 %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB111_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i64 %0, 4294967295
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3270,7 +3270,7 @@ define void @bit_32_1_nz_branch_i64(i64 %0) {
; RV32: # %bb.0:
; RV32-NEXT: beqz a0, .LBB112_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB112_2:
; RV32-NEXT: ret
;
@@ -3279,7 +3279,7 @@ define void @bit_32_1_nz_branch_i64(i64 %0) {
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: beqz a0, .LBB112_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB112_2:
; RV64-NEXT: ret
%2 = and i64 %0, 4294967295
@@ -3304,7 +3304,7 @@ define void @bit_62_1_z_branch_i64(i64 %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB113_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_62_1_z_branch_i64:
; RV64: # %bb.0:
@@ -3313,7 +3313,7 @@ define void @bit_62_1_z_branch_i64(i64 %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB113_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i64 %0, 4611686018427387903
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3334,7 +3334,7 @@ define void @bit_62_1_nz_branch_i64(i64 %0) {
; RV32-NEXT: or a0, a0, a1
; RV32-NEXT: beqz a0, .LBB114_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB114_2:
; RV32-NEXT: ret
;
@@ -3343,7 +3343,7 @@ define void @bit_62_1_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 2
; RV64-NEXT: beqz a0, .LBB114_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB114_2:
; RV64-NEXT: ret
%2 = and i64 %0, 4611686018427387903
@@ -3368,7 +3368,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB115_2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64-LABEL: bit_63_1_z_branch_i64:
; RV64: # %bb.0:
@@ -3377,7 +3377,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB115_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
;
; RV32ZBS-LABEL: bit_63_1_z_branch_i64:
; RV32ZBS: # %bb.0:
@@ -3387,7 +3387,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
; RV32ZBS-NEXT: # %bb.1:
; RV32ZBS-NEXT: ret
; RV32ZBS-NEXT: .LBB115_2:
-; RV32ZBS-NEXT: tail bar@plt
+; RV32ZBS-NEXT: tail bar
;
; RV32XTHEADBS-LABEL: bit_63_1_z_branch_i64:
; RV32XTHEADBS: # %bb.0:
@@ -3398,7 +3398,7 @@ define void @bit_63_1_z_branch_i64(i64 %0) {
; RV32XTHEADBS-NEXT: # %bb.1:
; RV32XTHEADBS-NEXT: ret
; RV32XTHEADBS-NEXT: .LBB115_2:
-; RV32XTHEADBS-NEXT: tail bar@plt
+; RV32XTHEADBS-NEXT: tail bar
%2 = and i64 %0, 9223372036854775807
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3419,7 +3419,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: beqz a0, .LBB116_2
; RV32I-NEXT: # %bb.1:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB116_2:
; RV32I-NEXT: ret
;
@@ -3428,7 +3428,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: beqz a0, .LBB116_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB116_2:
; RV64-NEXT: ret
;
@@ -3438,7 +3438,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
; RV32ZBS-NEXT: or a0, a0, a1
; RV32ZBS-NEXT: beqz a0, .LBB116_2
; RV32ZBS-NEXT: # %bb.1:
-; RV32ZBS-NEXT: tail bar@plt
+; RV32ZBS-NEXT: tail bar
; RV32ZBS-NEXT: .LBB116_2:
; RV32ZBS-NEXT: ret
;
@@ -3449,7 +3449,7 @@ define void @bit_63_1_nz_branch_i64(i64 %0) {
; RV32XTHEADBS-NEXT: or a0, a0, a1
; RV32XTHEADBS-NEXT: beqz a0, .LBB116_2
; RV32XTHEADBS-NEXT: # %bb.1:
-; RV32XTHEADBS-NEXT: tail bar@plt
+; RV32XTHEADBS-NEXT: tail bar
; RV32XTHEADBS-NEXT: .LBB116_2:
; RV32XTHEADBS-NEXT: ret
%2 = and i64 %0, 9223372036854775807
@@ -3472,7 +3472,7 @@ define void @bit_64_1_z_branch_i64(i64 %0) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: ret
; RV32-NEXT: .LBB117_2:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
;
; RV64-LABEL: bit_64_1_z_branch_i64:
; RV64: # %bb.0:
@@ -3480,7 +3480,7 @@ define void @bit_64_1_z_branch_i64(i64 %0) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: ret
; RV64-NEXT: .LBB117_2:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
%2 = and i64 %0, 18446744073709551615
%3 = icmp eq i64 %2, 0
br i1 %3, label %4, label %5
@@ -3499,7 +3499,7 @@ define void @bit_64_1_nz_branch_i64(i64 %0) {
; RV32-NEXT: or a0, a0, a1
; RV32-NEXT: beqz a0, .LBB118_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: tail bar@plt
+; RV32-NEXT: tail bar
; RV32-NEXT: .LBB118_2:
; RV32-NEXT: ret
;
@@ -3507,7 +3507,7 @@ define void @bit_64_1_nz_branch_i64(i64 %0) {
; RV64: # %bb.0:
; RV64-NEXT: beqz a0, .LBB118_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: tail bar@plt
+; RV64-NEXT: tail bar
; RV64-NEXT: .LBB118_2:
; RV64-NEXT: ret
%2 = and i64 %0, 18446744073709551615
diff --git a/llvm/test/CodeGen/RISCV/byval.ll b/llvm/test/CodeGen/RISCV/byval.ll
index d300542e0807..9151f3b03e7c 100644
--- a/llvm/test/CodeGen/RISCV/byval.ll
+++ b/llvm/test/CodeGen/RISCV/byval.ll
@@ -32,7 +32,7 @@ define void @caller() nounwind {
; RV32I-NEXT: lw a0, 4(a0)
; RV32I-NEXT: sw a0, 16(sp)
; RV32I-NEXT: addi a0, sp, 12
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
index 1aa7783fedd2..79e80dac8241 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll
@@ -629,7 +629,7 @@ define void @caller() nounwind {
; ILP32-NEXT: fsw fa5, 8(sp) # 4-byte Folded Spill
; ILP32-NEXT: flw fa5, 124(s1)
; ILP32-NEXT: fsw fa5, 4(sp) # 4-byte Folded Spill
-; ILP32-NEXT: call callee@plt
+; ILP32-NEXT: call callee
; ILP32-NEXT: flw fa5, 4(sp) # 4-byte Folded Reload
; ILP32-NEXT: fsw fa5, 124(s1)
; ILP32-NEXT: flw fa5, 8(sp) # 4-byte Folded Reload
@@ -772,7 +772,7 @@ define void @caller() nounwind {
; LP64-NEXT: fsw fa5, 12(sp) # 4-byte Folded Spill
; LP64-NEXT: flw fa5, 124(s1)
; LP64-NEXT: fsw fa5, 8(sp) # 4-byte Folded Spill
-; LP64-NEXT: call callee@plt
+; LP64-NEXT: call callee
; LP64-NEXT: flw fa5, 8(sp) # 4-byte Folded Reload
; LP64-NEXT: fsw fa5, 124(s1)
; LP64-NEXT: flw fa5, 12(sp) # 4-byte Folded Reload
@@ -915,7 +915,7 @@ define void @caller() nounwind {
; ILP32F-NEXT: flw fs5, 116(s1)
; ILP32F-NEXT: flw fs6, 120(s1)
; ILP32F-NEXT: flw fs7, 124(s1)
-; ILP32F-NEXT: call callee@plt
+; ILP32F-NEXT: call callee
; ILP32F-NEXT: fsw fs7, 124(s1)
; ILP32F-NEXT: fsw fs6, 120(s1)
; ILP32F-NEXT: fsw fs5, 116(s1)
@@ -1058,7 +1058,7 @@ define void @caller() nounwind {
; LP64F-NEXT: flw fs5, 116(s1)
; LP64F-NEXT: flw fs6, 120(s1)
; LP64F-NEXT: flw fs7, 124(s1)
-; LP64F-NEXT: call callee@plt
+; LP64F-NEXT: call callee
; LP64F-NEXT: fsw fs7, 124(s1)
; LP64F-NEXT: fsw fs6, 120(s1)
; LP64F-NEXT: fsw fs5, 116(s1)
@@ -1201,7 +1201,7 @@ define void @caller() nounwind {
; ILP32D-NEXT: flw fs5, 116(s1)
; ILP32D-NEXT: flw fs6, 120(s1)
; ILP32D-NEXT: flw fs7, 124(s1)
-; ILP32D-NEXT: call callee@plt
+; ILP32D-NEXT: call callee
; ILP32D-NEXT: fsw fs7, 124(s1)
; ILP32D-NEXT: fsw fs6, 120(s1)
; ILP32D-NEXT: fsw fs5, 116(s1)
@@ -1344,7 +1344,7 @@ define void @caller() nounwind {
; LP64D-NEXT: flw fs5, 116(s1)
; LP64D-NEXT: flw fs6, 120(s1)
; LP64D-NEXT: flw fs7, 124(s1)
-; LP64D-NEXT: call callee@plt
+; LP64D-NEXT: call callee
; LP64D-NEXT: fsw fs7, 124(s1)
; LP64D-NEXT: fsw fs6, 120(s1)
; LP64D-NEXT: fsw fs5, 116(s1)
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
index 40076316bca8..abfa26e8a4f2 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll
@@ -433,7 +433,7 @@ define void @caller() nounwind {
; ILP32-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
; ILP32-NEXT: fld fa5, 248(s1)
; ILP32-NEXT: fsd fa5, 0(sp) # 8-byte Folded Spill
-; ILP32-NEXT: call callee@plt
+; ILP32-NEXT: call callee
; ILP32-NEXT: fld fa5, 0(sp) # 8-byte Folded Reload
; ILP32-NEXT: fsd fa5, 248(s1)
; ILP32-NEXT: fld fa5, 8(sp) # 8-byte Folded Reload
@@ -576,7 +576,7 @@ define void @caller() nounwind {
; LP64-NEXT: fsd fa5, 16(sp) # 8-byte Folded Spill
; LP64-NEXT: fld fa5, 248(s1)
; LP64-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
-; LP64-NEXT: call callee@plt
+; LP64-NEXT: call callee
; LP64-NEXT: fld fa5, 8(sp) # 8-byte Folded Reload
; LP64-NEXT: fsd fa5, 248(s1)
; LP64-NEXT: fld fa5, 16(sp) # 8-byte Folded Reload
@@ -719,7 +719,7 @@ define void @caller() nounwind {
; ILP32D-NEXT: fld fs5, 232(s1)
; ILP32D-NEXT: fld fs6, 240(s1)
; ILP32D-NEXT: fld fs7, 248(s1)
-; ILP32D-NEXT: call callee@plt
+; ILP32D-NEXT: call callee
; ILP32D-NEXT: fsd fs7, 248(s1)
; ILP32D-NEXT: fsd fs6, 240(s1)
; ILP32D-NEXT: fsd fs5, 232(s1)
@@ -862,7 +862,7 @@ define void @caller() nounwind {
; LP64D-NEXT: fld fs5, 232(s1)
; LP64D-NEXT: fld fs6, 240(s1)
; LP64D-NEXT: fld fs7, 248(s1)
-; LP64D-NEXT: call callee@plt
+; LP64D-NEXT: call callee
; LP64D-NEXT: fsd fs7, 248(s1)
; LP64D-NEXT: fsd fs6, 240(s1)
; LP64D-NEXT: fsd fs5, 232(s1)
diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
index 09ecbbc7e8fe..6303a1245677 100644
--- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
+++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll
@@ -952,7 +952,7 @@ define void @caller() nounwind {
; RV32I-NEXT: lw s11, 116(s5)
; RV32I-NEXT: lw s1, 120(s5)
; RV32I-NEXT: lw s2, 124(s5)
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: sw s2, 124(s5)
; RV32I-NEXT: sw s1, 120(s5)
; RV32I-NEXT: sw s11, 116(s5)
@@ -1097,7 +1097,7 @@ define void @caller() nounwind {
; RV32I-WITH-FP-NEXT: lw s4, 116(s6)
; RV32I-WITH-FP-NEXT: lw s5, 120(s6)
; RV32I-WITH-FP-NEXT: lw s7, 124(s6)
-; RV32I-WITH-FP-NEXT: call callee@plt
+; RV32I-WITH-FP-NEXT: call callee
; RV32I-WITH-FP-NEXT: sw s7, 124(s6)
; RV32I-WITH-FP-NEXT: sw s5, 120(s6)
; RV32I-WITH-FP-NEXT: sw s4, 116(s6)
@@ -1229,7 +1229,7 @@ define void @caller() nounwind {
; RV32IZCMP-NEXT: lw s11, 116(s1)
; RV32IZCMP-NEXT: lw s2, 120(s1)
; RV32IZCMP-NEXT: lw s3, 124(s1)
-; RV32IZCMP-NEXT: call callee@plt
+; RV32IZCMP-NEXT: call callee
; RV32IZCMP-NEXT: sw s3, 124(s1)
; RV32IZCMP-NEXT: sw s2, 120(s1)
; RV32IZCMP-NEXT: sw s11, 116(s1)
@@ -1361,7 +1361,7 @@ define void @caller() nounwind {
; RV32IZCMP-WITH-FP-NEXT: lw s4, 116(s1)
; RV32IZCMP-WITH-FP-NEXT: lw s5, 120(s1)
; RV32IZCMP-WITH-FP-NEXT: lw s7, 124(s1)
-; RV32IZCMP-WITH-FP-NEXT: call callee@plt
+; RV32IZCMP-WITH-FP-NEXT: call callee
; RV32IZCMP-WITH-FP-NEXT: sw s7, 124(s1)
; RV32IZCMP-WITH-FP-NEXT: sw s5, 120(s1)
; RV32IZCMP-WITH-FP-NEXT: sw s4, 116(s1)
@@ -1505,7 +1505,7 @@ define void @caller() nounwind {
; RV64I-NEXT: lw s11, 116(s5)
; RV64I-NEXT: lw s1, 120(s5)
; RV64I-NEXT: lw s2, 124(s5)
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: sw s2, 124(s5)
; RV64I-NEXT: sw s1, 120(s5)
; RV64I-NEXT: sw s11, 116(s5)
@@ -1650,7 +1650,7 @@ define void @caller() nounwind {
; RV64I-WITH-FP-NEXT: lw s4, 116(s6)
; RV64I-WITH-FP-NEXT: lw s5, 120(s6)
; RV64I-WITH-FP-NEXT: lw s7, 124(s6)
-; RV64I-WITH-FP-NEXT: call callee@plt
+; RV64I-WITH-FP-NEXT: call callee
; RV64I-WITH-FP-NEXT: sw s7, 124(s6)
; RV64I-WITH-FP-NEXT: sw s5, 120(s6)
; RV64I-WITH-FP-NEXT: sw s4, 116(s6)
@@ -1782,7 +1782,7 @@ define void @caller() nounwind {
; RV64IZCMP-NEXT: lw s11, 116(s1)
; RV64IZCMP-NEXT: lw s2, 120(s1)
; RV64IZCMP-NEXT: lw s3, 124(s1)
-; RV64IZCMP-NEXT: call callee@plt
+; RV64IZCMP-NEXT: call callee
; RV64IZCMP-NEXT: sw s3, 124(s1)
; RV64IZCMP-NEXT: sw s2, 120(s1)
; RV64IZCMP-NEXT: sw s11, 116(s1)
@@ -1914,7 +1914,7 @@ define void @caller() nounwind {
; RV64IZCMP-WITH-FP-NEXT: lw s4, 116(s1)
; RV64IZCMP-WITH-FP-NEXT: lw s5, 120(s1)
; RV64IZCMP-WITH-FP-NEXT: lw s7, 124(s1)
-; RV64IZCMP-WITH-FP-NEXT: call callee@plt
+; RV64IZCMP-WITH-FP-NEXT: call callee
; RV64IZCMP-WITH-FP-NEXT: sw s7, 124(s1)
; RV64IZCMP-WITH-FP-NEXT: sw s5, 120(s1)
; RV64IZCMP-WITH-FP-NEXT: sw s4, 116(s1)
@@ -2279,7 +2279,7 @@ define void @varargs(...) {
; RV32I-NEXT: sw a2, 24(sp)
; RV32I-NEXT: sw a1, 20(sp)
; RV32I-NEXT: sw a0, 16(sp)
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 48
; RV32I-NEXT: ret
@@ -2302,7 +2302,7 @@ define void @varargs(...) {
; RV32I-WITH-FP-NEXT: sw a2, 8(s0)
; RV32I-WITH-FP-NEXT: sw a1, 4(s0)
; RV32I-WITH-FP-NEXT: sw a0, 0(s0)
-; RV32I-WITH-FP-NEXT: call callee@plt
+; RV32I-WITH-FP-NEXT: call callee
; RV32I-WITH-FP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITH-FP-NEXT: addi sp, sp, 48
@@ -2322,7 +2322,7 @@ define void @varargs(...) {
; RV32IZCMP-NEXT: sw a2, 24(sp)
; RV32IZCMP-NEXT: sw a1, 20(sp)
; RV32IZCMP-NEXT: sw a0, 16(sp)
-; RV32IZCMP-NEXT: call callee@plt
+; RV32IZCMP-NEXT: call callee
; RV32IZCMP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: addi sp, sp, 48
; RV32IZCMP-NEXT: ret
@@ -2345,7 +2345,7 @@ define void @varargs(...) {
; RV32IZCMP-WITH-FP-NEXT: sw a2, 8(s0)
; RV32IZCMP-WITH-FP-NEXT: sw a1, 4(s0)
; RV32IZCMP-WITH-FP-NEXT: sw a0, 0(s0)
-; RV32IZCMP-WITH-FP-NEXT: call callee@plt
+; RV32IZCMP-WITH-FP-NEXT: call callee
; RV32IZCMP-WITH-FP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZCMP-WITH-FP-NEXT: addi sp, sp, 48
@@ -2365,7 +2365,7 @@ define void @varargs(...) {
; RV64I-NEXT: sd a2, 32(sp)
; RV64I-NEXT: sd a1, 24(sp)
; RV64I-NEXT: sd a0, 16(sp)
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 80
; RV64I-NEXT: ret
@@ -2388,7 +2388,7 @@ define void @varargs(...) {
; RV64I-WITH-FP-NEXT: sd a2, 16(s0)
; RV64I-WITH-FP-NEXT: sd a1, 8(s0)
; RV64I-WITH-FP-NEXT: sd a0, 0(s0)
-; RV64I-WITH-FP-NEXT: call callee@plt
+; RV64I-WITH-FP-NEXT: call callee
; RV64I-WITH-FP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-WITH-FP-NEXT: addi sp, sp, 80
@@ -2408,7 +2408,7 @@ define void @varargs(...) {
; RV64IZCMP-NEXT: sd a2, 32(sp)
; RV64IZCMP-NEXT: sd a1, 24(sp)
; RV64IZCMP-NEXT: sd a0, 16(sp)
-; RV64IZCMP-NEXT: call callee@plt
+; RV64IZCMP-NEXT: call callee
; RV64IZCMP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: addi sp, sp, 80
; RV64IZCMP-NEXT: ret
@@ -2431,7 +2431,7 @@ define void @varargs(...) {
; RV64IZCMP-WITH-FP-NEXT: sd a2, 16(s0)
; RV64IZCMP-WITH-FP-NEXT: sd a1, 8(s0)
; RV64IZCMP-WITH-FP-NEXT: sd a0, 0(s0)
-; RV64IZCMP-WITH-FP-NEXT: call callee@plt
+; RV64IZCMP-WITH-FP-NEXT: call callee
; RV64IZCMP-WITH-FP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64IZCMP-WITH-FP-NEXT: addi sp, sp, 80
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index ad4578bda344..c88b2bf596ca 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -21,8 +21,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: slli a0, a1, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: add a0, s0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -37,8 +37,8 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: slli a0, a1, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: addw a0, s0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -52,7 +52,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: mv s0, a0
; RV32IF-NEXT: mv a0, a1
-; RV32IF-NEXT: call __extendhfsf2@plt
+; RV32IF-NEXT: call __extendhfsf2
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fcvt.w.s a0, fa5, rtz
; RV32IF-NEXT: add a0, s0, a0
@@ -68,7 +68,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV64IF-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT: mv s0, a0
; RV64IF-NEXT: mv a0, a1
-; RV64IF-NEXT: call __extendhfsf2@plt
+; RV64IF-NEXT: call __extendhfsf2
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fcvt.l.s a0, fa5, rtz
; RV64IF-NEXT: addw a0, s0, a0
@@ -83,7 +83,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: mv s0, a0
-; RV32-ILP32F-NEXT: call __extendhfsf2@plt
+; RV32-ILP32F-NEXT: call __extendhfsf2
; RV32-ILP32F-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT: add a0, s0, a0
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -97,7 +97,7 @@ define i32 @callee_half_in_regs(i32 %a, half %b) nounwind {
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: mv s0, a0
-; RV64-LP64F-NEXT: call __extendhfsf2@plt
+; RV64-LP64F-NEXT: call __extendhfsf2
; RV64-LP64F-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT: addw a0, s0, a0
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -130,7 +130,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a0, 1
; RV32I-NEXT: lui a1, 4
-; RV32I-NEXT: call callee_half_in_regs@plt
+; RV32I-NEXT: call callee_half_in_regs
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -141,7 +141,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a0, 1
; RV64I-NEXT: lui a1, 4
-; RV64I-NEXT: call callee_half_in_regs@plt
+; RV64I-NEXT: call callee_half_in_regs
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -152,7 +152,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: li a0, 1
; RV32IF-NEXT: lui a1, 1048564
-; RV32IF-NEXT: call callee_half_in_regs@plt
+; RV32IF-NEXT: call callee_half_in_regs
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -165,7 +165,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fmv.x.w a1, fa5
; RV64IF-NEXT: li a0, 1
-; RV64IF-NEXT: call callee_half_in_regs@plt
+; RV64IF-NEXT: call callee_half_in_regs
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -177,7 +177,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV32-ILP32F-NEXT: lui a0, 1048564
; RV32-ILP32F-NEXT: fmv.w.x fa0, a0
; RV32-ILP32F-NEXT: li a0, 1
-; RV32-ILP32F-NEXT: call callee_half_in_regs@plt
+; RV32-ILP32F-NEXT: call callee_half_in_regs
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
; RV32-ILP32F-NEXT: ret
@@ -189,7 +189,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV64-LP64F-NEXT: lui a0, 1048564
; RV64-LP64F-NEXT: fmv.w.x fa0, a0
; RV64-LP64F-NEXT: li a0, 1
-; RV64-LP64F-NEXT: call callee_half_in_regs@plt
+; RV64-LP64F-NEXT: call callee_half_in_regs
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
; RV64-LP64F-NEXT: ret
@@ -201,7 +201,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV32-ILP32ZFHMIN-NEXT: lui a0, 4
; RV32-ILP32ZFHMIN-NEXT: fmv.h.x fa0, a0
; RV32-ILP32ZFHMIN-NEXT: li a0, 1
-; RV32-ILP32ZFHMIN-NEXT: call callee_half_in_regs@plt
+; RV32-ILP32ZFHMIN-NEXT: call callee_half_in_regs
; RV32-ILP32ZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, 16
; RV32-ILP32ZFHMIN-NEXT: ret
@@ -213,7 +213,7 @@ define i32 @caller_half_in_regs() nounwind {
; RV64-LP64ZFHMIN-NEXT: lui a0, 4
; RV64-LP64ZFHMIN-NEXT: fmv.h.x fa0, a0
; RV64-LP64ZFHMIN-NEXT: li a0, 1
-; RV64-LP64ZFHMIN-NEXT: call callee_half_in_regs@plt
+; RV64-LP64ZFHMIN-NEXT: call callee_half_in_regs
; RV64-LP64ZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT: addi sp, sp, 16
; RV64-LP64ZFHMIN-NEXT: ret
@@ -229,8 +229,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: lhu a0, 16(sp)
; RV32I-NEXT: mv s0, a7
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: add a0, s0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -244,8 +244,8 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: lhu a0, 16(sp)
; RV64I-NEXT: mv s0, a7
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: addw a0, s0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -259,7 +259,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: lhu a0, 16(sp)
; RV32IF-NEXT: mv s0, a7
-; RV32IF-NEXT: call __extendhfsf2@plt
+; RV32IF-NEXT: call __extendhfsf2
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fcvt.w.s a0, fa5, rtz
; RV32IF-NEXT: add a0, s0, a0
@@ -275,7 +275,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
; RV64IF-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT: lhu a0, 16(sp)
; RV64IF-NEXT: mv s0, a7
-; RV64IF-NEXT: call __extendhfsf2@plt
+; RV64IF-NEXT: call __extendhfsf2
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fcvt.l.s a0, fa5, rtz
; RV64IF-NEXT: addw a0, s0, a0
@@ -290,7 +290,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32-ILP32F-NEXT: mv s0, a7
-; RV32-ILP32F-NEXT: call __extendhfsf2@plt
+; RV32-ILP32F-NEXT: call __extendhfsf2
; RV32-ILP32F-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT: add a0, s0, a0
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -304,7 +304,7 @@ define i32 @callee_half_on_stack(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f,
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64-LP64F-NEXT: mv s0, a7
-; RV64-LP64F-NEXT: call __extendhfsf2@plt
+; RV64-LP64F-NEXT: call __extendhfsf2
; RV64-LP64F-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT: addw a0, s0, a0
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -346,7 +346,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV32I-NEXT: li a6, 7
; RV32I-NEXT: li a7, 8
; RV32I-NEXT: sw t0, 0(sp)
-; RV32I-NEXT: call callee_half_on_stack@plt
+; RV32I-NEXT: call callee_half_on_stack
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -366,7 +366,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV64I-NEXT: li a6, 7
; RV64I-NEXT: li a7, 8
; RV64I-NEXT: sd t0, 0(sp)
-; RV64I-NEXT: call callee_half_on_stack@plt
+; RV64I-NEXT: call callee_half_on_stack
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -386,7 +386,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV32IF-NEXT: li a6, 7
; RV32IF-NEXT: li a7, 8
; RV32IF-NEXT: sw t0, 0(sp)
-; RV32IF-NEXT: call callee_half_on_stack@plt
+; RV32IF-NEXT: call callee_half_on_stack
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -406,7 +406,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV64IF-NEXT: li a6, 7
; RV64IF-NEXT: li a7, 8
; RV64IF-NEXT: sw t0, 0(sp)
-; RV64IF-NEXT: call callee_half_on_stack@plt
+; RV64IF-NEXT: call callee_half_on_stack
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -425,7 +425,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV32-ILP32F-NEXT: li a5, 6
; RV32-ILP32F-NEXT: li a6, 7
; RV32-ILP32F-NEXT: li a7, 8
-; RV32-ILP32F-NEXT: call callee_half_on_stack@plt
+; RV32-ILP32F-NEXT: call callee_half_on_stack
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
; RV32-ILP32F-NEXT: ret
@@ -444,7 +444,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV64-LP64F-NEXT: li a5, 6
; RV64-LP64F-NEXT: li a6, 7
; RV64-LP64F-NEXT: li a7, 8
-; RV64-LP64F-NEXT: call callee_half_on_stack@plt
+; RV64-LP64F-NEXT: call callee_half_on_stack
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
; RV64-LP64F-NEXT: ret
@@ -463,7 +463,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV32-ILP32ZFHMIN-NEXT: li a5, 6
; RV32-ILP32ZFHMIN-NEXT: li a6, 7
; RV32-ILP32ZFHMIN-NEXT: li a7, 8
-; RV32-ILP32ZFHMIN-NEXT: call callee_half_on_stack@plt
+; RV32-ILP32ZFHMIN-NEXT: call callee_half_on_stack
; RV32-ILP32ZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, 16
; RV32-ILP32ZFHMIN-NEXT: ret
@@ -482,7 +482,7 @@ define i32 @caller_half_on_stack() nounwind {
; RV64-LP64ZFHMIN-NEXT: li a5, 6
; RV64-LP64ZFHMIN-NEXT: li a6, 7
; RV64-LP64ZFHMIN-NEXT: li a7, 8
-; RV64-LP64ZFHMIN-NEXT: call callee_half_on_stack@plt
+; RV64-LP64ZFHMIN-NEXT: call callee_half_on_stack
; RV64-LP64ZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64ZFHMIN-NEXT: addi sp, sp, 16
; RV64-LP64ZFHMIN-NEXT: ret
@@ -547,11 +547,11 @@ define i32 @caller_half_ret() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call callee_half_ret@plt
+; RV32I-NEXT: call callee_half_ret
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -560,11 +560,11 @@ define i32 @caller_half_ret() nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call callee_half_ret@plt
+; RV64I-NEXT: call callee_half_ret
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -573,8 +573,8 @@ define i32 @caller_half_ret() nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call callee_half_ret@plt
-; RV32IF-NEXT: call __extendhfsf2@plt
+; RV32IF-NEXT: call callee_half_ret
+; RV32IF-NEXT: call __extendhfsf2
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fcvt.w.s a0, fa5, rtz
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -585,8 +585,8 @@ define i32 @caller_half_ret() nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call callee_half_ret@plt
-; RV64IF-NEXT: call __extendhfsf2@plt
+; RV64IF-NEXT: call callee_half_ret
+; RV64IF-NEXT: call __extendhfsf2
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fcvt.l.s a0, fa5, rtz
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -597,8 +597,8 @@ define i32 @caller_half_ret() nounwind {
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: addi sp, sp, -16
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32F-NEXT: call callee_half_ret@plt
-; RV32-ILP32F-NEXT: call __extendhfsf2@plt
+; RV32-ILP32F-NEXT: call callee_half_ret
+; RV32-ILP32F-NEXT: call __extendhfsf2
; RV32-ILP32F-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32F-NEXT: addi sp, sp, 16
@@ -608,8 +608,8 @@ define i32 @caller_half_ret() nounwind {
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: addi sp, sp, -16
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64F-NEXT: call callee_half_ret@plt
-; RV64-LP64F-NEXT: call __extendhfsf2@plt
+; RV64-LP64F-NEXT: call callee_half_ret
+; RV64-LP64F-NEXT: call __extendhfsf2
; RV64-LP64F-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64F-NEXT: addi sp, sp, 16
@@ -619,7 +619,7 @@ define i32 @caller_half_ret() nounwind {
; RV32-ILP32ZFHMIN: # %bb.0:
; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32ZFHMIN-NEXT: call callee_half_ret@plt
+; RV32-ILP32ZFHMIN-NEXT: call callee_half_ret
; RV32-ILP32ZFHMIN-NEXT: fcvt.s.h fa5, fa0
; RV32-ILP32ZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz
; RV32-ILP32ZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -630,7 +630,7 @@ define i32 @caller_half_ret() nounwind {
; RV64-LP64ZFHMIN: # %bb.0:
; RV64-LP64ZFHMIN-NEXT: addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64ZFHMIN-NEXT: call callee_half_ret@plt
+; RV64-LP64ZFHMIN-NEXT: call callee_half_ret
; RV64-LP64ZFHMIN-NEXT: fcvt.s.h fa5, fa0
; RV64-LP64ZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz
; RV64-LP64ZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
index 24e2d31707e9..278187f62cd7 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll
@@ -26,7 +26,7 @@ define i32 @callee_double_in_regs(i32 %a, double %b) nounwind {
; RV32I-FPELIM-NEXT: mv s0, a0
; RV32I-FPELIM-NEXT: mv a0, a1
; RV32I-FPELIM-NEXT: mv a1, a2
-; RV32I-FPELIM-NEXT: call __fixdfsi@plt
+; RV32I-FPELIM-NEXT: call __fixdfsi
; RV32I-FPELIM-NEXT: add a0, s0, a0
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -43,7 +43,7 @@ define i32 @callee_double_in_regs(i32 %a, double %b) nounwind {
; RV32I-WITHFP-NEXT: mv s1, a0
; RV32I-WITHFP-NEXT: mv a0, a1
; RV32I-WITHFP-NEXT: mv a1, a2
-; RV32I-WITHFP-NEXT: call __fixdfsi@plt
+; RV32I-WITHFP-NEXT: call __fixdfsi
; RV32I-WITHFP-NEXT: add a0, s1, a0
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -63,7 +63,7 @@ define i32 @caller_double_in_regs() nounwind {
; RV32I-FPELIM-NEXT: li a0, 1
; RV32I-FPELIM-NEXT: lui a2, 262144
; RV32I-FPELIM-NEXT: li a1, 0
-; RV32I-FPELIM-NEXT: call callee_double_in_regs@plt
+; RV32I-FPELIM-NEXT: call callee_double_in_regs
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -77,7 +77,7 @@ define i32 @caller_double_in_regs() nounwind {
; RV32I-WITHFP-NEXT: li a0, 1
; RV32I-WITHFP-NEXT: lui a2, 262144
; RV32I-WITHFP-NEXT: li a1, 0
-; RV32I-WITHFP-NEXT: call callee_double_in_regs@plt
+; RV32I-WITHFP-NEXT: call callee_double_in_regs
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
@@ -180,7 +180,7 @@ define void @caller_aligned_stack() nounwind {
; RV32I-FPELIM-NEXT: li a6, 4
; RV32I-FPELIM-NEXT: li a7, 14
; RV32I-FPELIM-NEXT: sw t0, 32(sp)
-; RV32I-FPELIM-NEXT: call callee_aligned_stack@plt
+; RV32I-FPELIM-NEXT: call callee_aligned_stack
; RV32I-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 64
; RV32I-FPELIM-NEXT: ret
@@ -226,7 +226,7 @@ define void @caller_aligned_stack() nounwind {
; RV32I-WITHFP-NEXT: li a6, 4
; RV32I-WITHFP-NEXT: li a7, 14
; RV32I-WITHFP-NEXT: sw t0, -32(s0)
-; RV32I-WITHFP-NEXT: call callee_aligned_stack@plt
+; RV32I-WITHFP-NEXT: call callee_aligned_stack
; RV32I-WITHFP-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 64
@@ -265,7 +265,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: addi sp, sp, -16
; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT: call callee_small_scalar_ret@plt
+; RV32I-FPELIM-NEXT: call callee_small_scalar_ret
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -276,7 +276,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 16
-; RV32I-WITHFP-NEXT: call callee_small_scalar_ret@plt
+; RV32I-WITHFP-NEXT: call callee_small_scalar_ret
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
index 649234efaad9..231ed159ab20 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll
@@ -54,7 +54,7 @@ define i32 @caller_i64_in_regs() nounwind {
; RV32I-FPELIM-NEXT: li a0, 1
; RV32I-FPELIM-NEXT: li a1, 2
; RV32I-FPELIM-NEXT: li a2, 0
-; RV32I-FPELIM-NEXT: call callee_i64_in_regs@plt
+; RV32I-FPELIM-NEXT: call callee_i64_in_regs
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -68,7 +68,7 @@ define i32 @caller_i64_in_regs() nounwind {
; RV32I-WITHFP-NEXT: li a0, 1
; RV32I-WITHFP-NEXT: li a1, 2
; RV32I-WITHFP-NEXT: li a2, 0
-; RV32I-WITHFP-NEXT: call callee_i64_in_regs@plt
+; RV32I-WITHFP-NEXT: call callee_i64_in_regs
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
@@ -153,7 +153,7 @@ define i32 @caller_many_scalars() nounwind {
; RV32I-FPELIM-NEXT: li a7, 7
; RV32I-FPELIM-NEXT: sw zero, 0(sp)
; RV32I-FPELIM-NEXT: li a4, 0
-; RV32I-FPELIM-NEXT: call callee_many_scalars@plt
+; RV32I-FPELIM-NEXT: call callee_many_scalars
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -175,7 +175,7 @@ define i32 @caller_many_scalars() nounwind {
; RV32I-WITHFP-NEXT: li a7, 7
; RV32I-WITHFP-NEXT: sw zero, 0(sp)
; RV32I-WITHFP-NEXT: li a4, 0
-; RV32I-WITHFP-NEXT: call callee_many_scalars@plt
+; RV32I-WITHFP-NEXT: call callee_many_scalars
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
@@ -257,7 +257,7 @@ define i32 @caller_large_scalars() nounwind {
; RV32I-FPELIM-NEXT: addi a0, sp, 24
; RV32I-FPELIM-NEXT: mv a1, sp
; RV32I-FPELIM-NEXT: sw a2, 24(sp)
-; RV32I-FPELIM-NEXT: call callee_large_scalars@plt
+; RV32I-FPELIM-NEXT: call callee_large_scalars
; RV32I-FPELIM-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 48
; RV32I-FPELIM-NEXT: ret
@@ -280,7 +280,7 @@ define i32 @caller_large_scalars() nounwind {
; RV32I-WITHFP-NEXT: addi a0, s0, -24
; RV32I-WITHFP-NEXT: addi a1, s0, -48
; RV32I-WITHFP-NEXT: sw a2, -24(s0)
-; RV32I-WITHFP-NEXT: call callee_large_scalars@plt
+; RV32I-WITHFP-NEXT: call callee_large_scalars
; RV32I-WITHFP-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 48
@@ -375,7 +375,7 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
; RV32I-FPELIM-NEXT: li a6, 7
; RV32I-FPELIM-NEXT: addi a7, sp, 40
; RV32I-FPELIM-NEXT: sw zero, 44(sp)
-; RV32I-FPELIM-NEXT: call callee_large_scalars_exhausted_regs@plt
+; RV32I-FPELIM-NEXT: call callee_large_scalars_exhausted_regs
; RV32I-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 64
; RV32I-FPELIM-NEXT: ret
@@ -408,7 +408,7 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind {
; RV32I-WITHFP-NEXT: li a6, 7
; RV32I-WITHFP-NEXT: addi a7, s0, -24
; RV32I-WITHFP-NEXT: sw zero, -20(s0)
-; RV32I-WITHFP-NEXT: call callee_large_scalars_exhausted_regs@plt
+; RV32I-WITHFP-NEXT: call callee_large_scalars_exhausted_regs
; RV32I-WITHFP-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 64
@@ -429,7 +429,7 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
; RV32I-FPELIM-NEXT: mv a2, a1
; RV32I-FPELIM-NEXT: mv a1, a0
; RV32I-FPELIM-NEXT: addi a0, sp, 8
-; RV32I-FPELIM-NEXT: call __floatditf@plt
+; RV32I-FPELIM-NEXT: call __floatditf
; RV32I-FPELIM-NEXT: lw a0, 8(sp)
; RV32I-FPELIM-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 32
@@ -444,7 +444,7 @@ define i32 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
; RV32I-WITHFP-NEXT: mv a2, a1
; RV32I-WITHFP-NEXT: mv a1, a0
; RV32I-WITHFP-NEXT: addi a0, s0, -24
-; RV32I-WITHFP-NEXT: call __floatditf@plt
+; RV32I-WITHFP-NEXT: call __floatditf
; RV32I-WITHFP-NEXT: lw a0, -24(s0)
; RV32I-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
@@ -493,7 +493,7 @@ define i32 @caller_small_coerced_struct() nounwind {
; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-FPELIM-NEXT: li a0, 1
; RV32I-FPELIM-NEXT: li a1, 2
-; RV32I-FPELIM-NEXT: call callee_small_coerced_struct@plt
+; RV32I-FPELIM-NEXT: call callee_small_coerced_struct
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -506,7 +506,7 @@ define i32 @caller_small_coerced_struct() nounwind {
; RV32I-WITHFP-NEXT: addi s0, sp, 16
; RV32I-WITHFP-NEXT: li a0, 1
; RV32I-WITHFP-NEXT: li a1, 2
-; RV32I-WITHFP-NEXT: call callee_small_coerced_struct@plt
+; RV32I-WITHFP-NEXT: call callee_small_coerced_struct
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
@@ -565,7 +565,7 @@ define i32 @caller_large_struct() nounwind {
; RV32I-FPELIM-NEXT: sw a2, 16(sp)
; RV32I-FPELIM-NEXT: sw a3, 20(sp)
; RV32I-FPELIM-NEXT: addi a0, sp, 8
-; RV32I-FPELIM-NEXT: call callee_large_struct@plt
+; RV32I-FPELIM-NEXT: call callee_large_struct
; RV32I-FPELIM-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 48
; RV32I-FPELIM-NEXT: ret
@@ -589,7 +589,7 @@ define i32 @caller_large_struct() nounwind {
; RV32I-WITHFP-NEXT: sw a2, -32(s0)
; RV32I-WITHFP-NEXT: sw a3, -28(s0)
; RV32I-WITHFP-NEXT: addi a0, s0, -40
-; RV32I-WITHFP-NEXT: call callee_large_struct@plt
+; RV32I-WITHFP-NEXT: call callee_large_struct
; RV32I-WITHFP-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 48
@@ -696,7 +696,7 @@ define void @caller_aligned_stack() nounwind {
; RV32I-FPELIM-NEXT: li a6, 4
; RV32I-FPELIM-NEXT: li a7, 14
; RV32I-FPELIM-NEXT: sw t0, 32(sp)
-; RV32I-FPELIM-NEXT: call callee_aligned_stack@plt
+; RV32I-FPELIM-NEXT: call callee_aligned_stack
; RV32I-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 64
; RV32I-FPELIM-NEXT: ret
@@ -739,7 +739,7 @@ define void @caller_aligned_stack() nounwind {
; RV32I-WITHFP-NEXT: li a6, 4
; RV32I-WITHFP-NEXT: li a7, 14
; RV32I-WITHFP-NEXT: sw t0, -32(s0)
-; RV32I-WITHFP-NEXT: call callee_aligned_stack@plt
+; RV32I-WITHFP-NEXT: call callee_aligned_stack
; RV32I-WITHFP-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 64
@@ -782,7 +782,7 @@ define i32 @caller_small_scalar_ret() nounwind {
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: addi sp, sp, -16
; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT: call callee_small_scalar_ret@plt
+; RV32I-FPELIM-NEXT: call callee_small_scalar_ret
; RV32I-FPELIM-NEXT: lui a2, 56
; RV32I-FPELIM-NEXT: addi a2, a2, 580
; RV32I-FPELIM-NEXT: xor a1, a1, a2
@@ -801,7 +801,7 @@ define i32 @caller_small_scalar_ret() nounwind {
; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 16
-; RV32I-WITHFP-NEXT: call callee_small_scalar_ret@plt
+; RV32I-WITHFP-NEXT: call callee_small_scalar_ret
; RV32I-WITHFP-NEXT: lui a2, 56
; RV32I-WITHFP-NEXT: addi a2, a2, 580
; RV32I-WITHFP-NEXT: xor a1, a1, a2
@@ -849,7 +849,7 @@ define i32 @caller_small_struct_ret() nounwind {
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: addi sp, sp, -16
; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT: call callee_small_struct_ret@plt
+; RV32I-FPELIM-NEXT: call callee_small_struct_ret
; RV32I-FPELIM-NEXT: add a0, a0, a1
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
@@ -861,7 +861,7 @@ define i32 @caller_small_struct_ret() nounwind {
; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 16
-; RV32I-WITHFP-NEXT: call callee_small_struct_ret@plt
+; RV32I-WITHFP-NEXT: call callee_small_struct_ret
; RV32I-WITHFP-NEXT: add a0, a0, a1
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -911,7 +911,7 @@ define void @caller_large_scalar_ret() nounwind {
; RV32I-FPELIM-NEXT: addi sp, sp, -32
; RV32I-FPELIM-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-FPELIM-NEXT: mv a0, sp
-; RV32I-FPELIM-NEXT: call callee_large_scalar_ret@plt
+; RV32I-FPELIM-NEXT: call callee_large_scalar_ret
; RV32I-FPELIM-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 32
; RV32I-FPELIM-NEXT: ret
@@ -923,7 +923,7 @@ define void @caller_large_scalar_ret() nounwind {
; RV32I-WITHFP-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 32
; RV32I-WITHFP-NEXT: addi a0, s0, -32
-; RV32I-WITHFP-NEXT: call callee_large_scalar_ret@plt
+; RV32I-WITHFP-NEXT: call callee_large_scalar_ret
; RV32I-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 32
@@ -981,7 +981,7 @@ define i32 @caller_large_struct_ret() nounwind {
; RV32I-FPELIM-NEXT: addi sp, sp, -32
; RV32I-FPELIM-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32I-FPELIM-NEXT: addi a0, sp, 8
-; RV32I-FPELIM-NEXT: call callee_large_struct_ret@plt
+; RV32I-FPELIM-NEXT: call callee_large_struct_ret
; RV32I-FPELIM-NEXT: lw a0, 8(sp)
; RV32I-FPELIM-NEXT: lw a1, 20(sp)
; RV32I-FPELIM-NEXT: add a0, a0, a1
@@ -996,7 +996,7 @@ define i32 @caller_large_struct_ret() nounwind {
; RV32I-WITHFP-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 32
; RV32I-WITHFP-NEXT: addi a0, s0, -24
-; RV32I-WITHFP-NEXT: call callee_large_struct_ret@plt
+; RV32I-WITHFP-NEXT: call callee_large_struct_ret
; RV32I-WITHFP-NEXT: lw a0, -24(s0)
; RV32I-WITHFP-NEXT: lw a1, -12(s0)
; RV32I-WITHFP-NEXT: add a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
index 07acb9fd80a9..1dac139503ba 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll
@@ -20,7 +20,7 @@ define i32 @callee_float_in_regs(i32 %a, float %b) nounwind {
; RV32I-FPELIM-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-FPELIM-NEXT: mv s0, a0
; RV32I-FPELIM-NEXT: mv a0, a1
-; RV32I-FPELIM-NEXT: call __fixsfsi@plt
+; RV32I-FPELIM-NEXT: call __fixsfsi
; RV32I-FPELIM-NEXT: add a0, s0, a0
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -36,7 +36,7 @@ define i32 @callee_float_in_regs(i32 %a, float %b) nounwind {
; RV32I-WITHFP-NEXT: addi s0, sp, 16
; RV32I-WITHFP-NEXT: mv s1, a0
; RV32I-WITHFP-NEXT: mv a0, a1
-; RV32I-WITHFP-NEXT: call __fixsfsi@plt
+; RV32I-WITHFP-NEXT: call __fixsfsi
; RV32I-WITHFP-NEXT: add a0, s1, a0
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -55,7 +55,7 @@ define i32 @caller_float_in_regs() nounwind {
; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-FPELIM-NEXT: li a0, 1
; RV32I-FPELIM-NEXT: lui a1, 262144
-; RV32I-FPELIM-NEXT: call callee_float_in_regs@plt
+; RV32I-FPELIM-NEXT: call callee_float_in_regs
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -68,7 +68,7 @@ define i32 @caller_float_in_regs() nounwind {
; RV32I-WITHFP-NEXT: addi s0, sp, 16
; RV32I-WITHFP-NEXT: li a0, 1
; RV32I-WITHFP-NEXT: lui a1, 262144
-; RV32I-WITHFP-NEXT: call callee_float_in_regs@plt
+; RV32I-WITHFP-NEXT: call callee_float_in_regs
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
@@ -117,7 +117,7 @@ define i32 @caller_float_on_stack() nounwind {
; RV32I-FPELIM-NEXT: li a3, 0
; RV32I-FPELIM-NEXT: li a5, 0
; RV32I-FPELIM-NEXT: li a7, 0
-; RV32I-FPELIM-NEXT: call callee_float_on_stack@plt
+; RV32I-FPELIM-NEXT: call callee_float_on_stack
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -138,7 +138,7 @@ define i32 @caller_float_on_stack() nounwind {
; RV32I-WITHFP-NEXT: li a3, 0
; RV32I-WITHFP-NEXT: li a5, 0
; RV32I-WITHFP-NEXT: li a7, 0
-; RV32I-WITHFP-NEXT: call callee_float_on_stack@plt
+; RV32I-WITHFP-NEXT: call callee_float_on_stack
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
@@ -172,7 +172,7 @@ define i32 @caller_tiny_scalar_ret() nounwind {
; RV32I-FPELIM: # %bb.0:
; RV32I-FPELIM-NEXT: addi sp, sp, -16
; RV32I-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-FPELIM-NEXT: call callee_tiny_scalar_ret@plt
+; RV32I-FPELIM-NEXT: call callee_tiny_scalar_ret
; RV32I-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 16
; RV32I-FPELIM-NEXT: ret
@@ -183,7 +183,7 @@ define i32 @caller_tiny_scalar_ret() nounwind {
; RV32I-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-WITHFP-NEXT: addi s0, sp, 16
-; RV32I-WITHFP-NEXT: call callee_tiny_scalar_ret@plt
+; RV32I-WITHFP-NEXT: call callee_tiny_scalar_ret
; RV32I-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
index 4897170a82f2..bcceea7ac35b 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll
@@ -25,7 +25,7 @@ define i32 @caller_double_in_fpr() nounwind {
; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI1_0)
; RV32-ILP32D-NEXT: fld fa0, %lo(.LCPI1_0)(a0)
; RV32-ILP32D-NEXT: li a0, 1
-; RV32-ILP32D-NEXT: call callee_double_in_fpr@plt
+; RV32-ILP32D-NEXT: call callee_double_in_fpr
; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32D-NEXT: addi sp, sp, 16
; RV32-ILP32D-NEXT: ret
@@ -63,7 +63,7 @@ define i32 @caller_double_in_fpr_exhausted_gprs() nounwind {
; RV32-ILP32D-NEXT: li a3, 0
; RV32-ILP32D-NEXT: li a5, 0
; RV32-ILP32D-NEXT: li a7, 0
-; RV32-ILP32D-NEXT: call callee_double_in_fpr_exhausted_gprs@plt
+; RV32-ILP32D-NEXT: call callee_double_in_fpr_exhausted_gprs
; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32D-NEXT: addi sp, sp, 16
; RV32-ILP32D-NEXT: ret
@@ -114,7 +114,7 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind {
; RV32-ILP32D-NEXT: fld fa7, %lo(.LCPI5_7)(a0)
; RV32-ILP32D-NEXT: lui a1, 262688
; RV32-ILP32D-NEXT: li a0, 0
-; RV32-ILP32D-NEXT: call callee_double_in_gpr_exhausted_fprs@plt
+; RV32-ILP32D-NEXT: call callee_double_in_gpr_exhausted_fprs
; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32D-NEXT: addi sp, sp, 16
; RV32-ILP32D-NEXT: ret
@@ -173,7 +173,7 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind
; RV32-ILP32D-NEXT: li a3, 0
; RV32-ILP32D-NEXT: li a5, 0
; RV32-ILP32D-NEXT: li a7, 0
-; RV32-ILP32D-NEXT: call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs@plt
+; RV32-ILP32D-NEXT: call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs
; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32D-NEXT: addi sp, sp, 16
; RV32-ILP32D-NEXT: ret
@@ -230,7 +230,7 @@ define i32 @caller_double_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ILP32D-NEXT: li a3, 0
; RV32-ILP32D-NEXT: li a5, 0
; RV32-ILP32D-NEXT: li a7, 0
-; RV32-ILP32D-NEXT: call callee_double_on_stack_exhausted_gprs_fprs@plt
+; RV32-ILP32D-NEXT: call callee_double_on_stack_exhausted_gprs_fprs
; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32D-NEXT: addi sp, sp, 16
; RV32-ILP32D-NEXT: ret
@@ -254,7 +254,7 @@ define i32 @caller_double_ret() nounwind {
; RV32-ILP32D: # %bb.0:
; RV32-ILP32D-NEXT: addi sp, sp, -16
; RV32-ILP32D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32D-NEXT: call callee_double_ret@plt
+; RV32-ILP32D-NEXT: call callee_double_ret
; RV32-ILP32D-NEXT: fsd fa0, 0(sp)
; RV32-ILP32D-NEXT: lw a0, 0(sp)
; RV32-ILP32D-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
index bb51f71358ad..b0d60a7aaa23 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll
@@ -28,7 +28,7 @@ define i32 @caller_float_in_fpr() nounwind {
; RV32-ILP32FD-NEXT: lui a0, 262144
; RV32-ILP32FD-NEXT: fmv.w.x fa0, a0
; RV32-ILP32FD-NEXT: li a0, 1
-; RV32-ILP32FD-NEXT: call callee_float_in_fpr@plt
+; RV32-ILP32FD-NEXT: call callee_float_in_fpr
; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32FD-NEXT: addi sp, sp, 16
; RV32-ILP32FD-NEXT: ret
@@ -66,7 +66,7 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind {
; RV32-ILP32FD-NEXT: li a3, 0
; RV32-ILP32FD-NEXT: li a5, 0
; RV32-ILP32FD-NEXT: li a7, 0
-; RV32-ILP32FD-NEXT: call callee_float_in_fpr_exhausted_gprs@plt
+; RV32-ILP32FD-NEXT: call callee_float_in_fpr_exhausted_gprs
; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32FD-NEXT: addi sp, sp, 16
; RV32-ILP32FD-NEXT: ret
@@ -112,7 +112,7 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind {
; RV32-ILP32FD-NEXT: lui a0, 266240
; RV32-ILP32FD-NEXT: fmv.w.x fa7, a0
; RV32-ILP32FD-NEXT: lui a0, 266496
-; RV32-ILP32FD-NEXT: call callee_float_in_gpr_exhausted_fprs@plt
+; RV32-ILP32FD-NEXT: call callee_float_in_gpr_exhausted_fprs
; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32FD-NEXT: addi sp, sp, 16
; RV32-ILP32FD-NEXT: ret
@@ -167,7 +167,7 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ILP32FD-NEXT: li a3, 0
; RV32-ILP32FD-NEXT: li a5, 0
; RV32-ILP32FD-NEXT: li a7, 0
-; RV32-ILP32FD-NEXT: call callee_float_on_stack_exhausted_gprs_fprs@plt
+; RV32-ILP32FD-NEXT: call callee_float_on_stack_exhausted_gprs_fprs
; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32FD-NEXT: addi sp, sp, 16
; RV32-ILP32FD-NEXT: ret
@@ -191,7 +191,7 @@ define i32 @caller_float_ret() nounwind {
; RV32-ILP32FD: # %bb.0:
; RV32-ILP32FD-NEXT: addi sp, sp, -16
; RV32-ILP32FD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32FD-NEXT: call callee_float_ret@plt
+; RV32-ILP32FD-NEXT: call callee_float_ret
; RV32-ILP32FD-NEXT: fmv.x.w a0, fa0
; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32FD-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
index f424e77182a9..a1d8ea66980d 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll
@@ -17,7 +17,7 @@ define i64 @callee_double_in_regs(i64 %a, double %b) nounwind {
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: add a0, s0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -36,7 +36,7 @@ define i64 @caller_double_in_regs() nounwind {
; RV64I-NEXT: li a1, 1
; RV64I-NEXT: slli a1, a1, 62
; RV64I-NEXT: li a0, 1
-; RV64I-NEXT: call callee_double_in_regs@plt
+; RV64I-NEXT: call callee_double_in_regs
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -58,7 +58,7 @@ define i64 @caller_double_ret() nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call callee_double_ret@plt
+; RV64I-NEXT: call callee_double_ret
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
index c2690d15665e..d84711294330 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll
@@ -35,7 +35,7 @@ define i64 @caller_i128_in_regs() nounwind {
; RV64I-NEXT: li a0, 1
; RV64I-NEXT: li a1, 2
; RV64I-NEXT: li a2, 0
-; RV64I-NEXT: call callee_i128_in_regs@plt
+; RV64I-NEXT: call callee_i128_in_regs
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -93,7 +93,7 @@ define i32 @caller_many_scalars() nounwind {
; RV64I-NEXT: li a7, 7
; RV64I-NEXT: sd zero, 0(sp)
; RV64I-NEXT: li a4, 0
-; RV64I-NEXT: call callee_many_scalars@plt
+; RV64I-NEXT: call callee_many_scalars
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
@@ -145,7 +145,7 @@ define i64 @caller_large_scalars() nounwind {
; RV64I-NEXT: addi a0, sp, 32
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: sd zero, 40(sp)
-; RV64I-NEXT: call callee_large_scalars@plt
+; RV64I-NEXT: call callee_large_scalars
; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 80
; RV64I-NEXT: ret
@@ -210,7 +210,7 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind {
; RV64I-NEXT: li a6, 7
; RV64I-NEXT: addi a7, sp, 48
; RV64I-NEXT: sd zero, 56(sp)
-; RV64I-NEXT: call callee_large_scalars_exhausted_regs@plt
+; RV64I-NEXT: call callee_large_scalars_exhausted_regs
; RV64I-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 96
; RV64I-NEXT: ret
@@ -227,7 +227,7 @@ define i64 @caller_mixed_scalar_libcalls(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatditf@plt
+; RV64I-NEXT: call __floatditf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -261,7 +261,7 @@ define i64 @caller_small_coerced_struct() nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a0, 1
; RV64I-NEXT: li a1, 2
-; RV64I-NEXT: call callee_small_coerced_struct@plt
+; RV64I-NEXT: call callee_small_coerced_struct
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -305,7 +305,7 @@ define i64 @caller_large_struct() nounwind {
; RV64I-NEXT: sd a2, 24(sp)
; RV64I-NEXT: sd a3, 32(sp)
; RV64I-NEXT: addi a0, sp, 8
-; RV64I-NEXT: call callee_large_struct@plt
+; RV64I-NEXT: call callee_large_struct
; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 80
; RV64I-NEXT: ret
@@ -375,7 +375,7 @@ define void @caller_aligned_stack() nounwind {
; RV64I-NEXT: li a7, 7
; RV64I-NEXT: sd a6, 0(sp)
; RV64I-NEXT: li a6, 0
-; RV64I-NEXT: call callee_aligned_stack@plt
+; RV64I-NEXT: call callee_aligned_stack
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 64
; RV64I-NEXT: ret
@@ -400,7 +400,7 @@ define i64 @caller_small_scalar_ret() nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call callee_small_scalar_ret@plt
+; RV64I-NEXT: call callee_small_scalar_ret
; RV64I-NEXT: not a1, a1
; RV64I-NEXT: xori a0, a0, -2
; RV64I-NEXT: or a0, a0, a1
@@ -430,7 +430,7 @@ define i64 @caller_small_struct_ret() nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call callee_small_struct_ret@plt
+; RV64I-NEXT: call callee_small_struct_ret
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -465,7 +465,7 @@ define void @caller_large_scalar_ret() nounwind {
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee_large_scalar_ret@plt
+; RV64I-NEXT: call callee_large_scalar_ret
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
@@ -507,7 +507,7 @@ define i64 @caller_large_struct_ret() nounwind {
; RV64I-NEXT: addi sp, sp, -48
; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a0, sp, 8
-; RV64I-NEXT: call callee_large_struct_ret@plt
+; RV64I-NEXT: call callee_large_struct_ret
; RV64I-NEXT: ld a0, 8(sp)
; RV64I-NEXT: ld a1, 32(sp)
; RV64I-NEXT: add a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
index bf98412bc315..c2db8fe5248f 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll
@@ -22,7 +22,7 @@ define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
; RV64I-FPELIM-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-FPELIM-NEXT: mv s0, a0
; RV64I-FPELIM-NEXT: mv a0, a1
-; RV64I-FPELIM-NEXT: call __fixsfdi@plt
+; RV64I-FPELIM-NEXT: call __fixsfdi
; RV64I-FPELIM-NEXT: add a0, s0, a0
; RV64I-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-FPELIM-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -38,7 +38,7 @@ define i64 @callee_float_in_regs(i64 %a, float %b) nounwind {
; RV64I-WITHFP-NEXT: addi s0, sp, 32
; RV64I-WITHFP-NEXT: mv s1, a0
; RV64I-WITHFP-NEXT: mv a0, a1
-; RV64I-WITHFP-NEXT: call __fixsfdi@plt
+; RV64I-WITHFP-NEXT: call __fixsfdi
; RV64I-WITHFP-NEXT: add a0, s1, a0
; RV64I-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -57,7 +57,7 @@ define i64 @caller_float_in_regs() nounwind {
; RV64I-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-FPELIM-NEXT: li a0, 1
; RV64I-FPELIM-NEXT: lui a1, 262144
-; RV64I-FPELIM-NEXT: call callee_float_in_regs@plt
+; RV64I-FPELIM-NEXT: call callee_float_in_regs
; RV64I-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-FPELIM-NEXT: addi sp, sp, 16
; RV64I-FPELIM-NEXT: ret
@@ -70,7 +70,7 @@ define i64 @caller_float_in_regs() nounwind {
; RV64I-WITHFP-NEXT: addi s0, sp, 16
; RV64I-WITHFP-NEXT: li a0, 1
; RV64I-WITHFP-NEXT: lui a1, 262144
-; RV64I-WITHFP-NEXT: call callee_float_in_regs@plt
+; RV64I-WITHFP-NEXT: call callee_float_in_regs
; RV64I-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-WITHFP-NEXT: addi sp, sp, 16
@@ -118,7 +118,7 @@ define i64 @caller_float_on_stack() nounwind {
; RV64I-FPELIM-NEXT: li a3, 0
; RV64I-FPELIM-NEXT: li a5, 0
; RV64I-FPELIM-NEXT: li a7, 0
-; RV64I-FPELIM-NEXT: call callee_float_on_stack@plt
+; RV64I-FPELIM-NEXT: call callee_float_on_stack
; RV64I-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-FPELIM-NEXT: addi sp, sp, 16
; RV64I-FPELIM-NEXT: ret
@@ -139,7 +139,7 @@ define i64 @caller_float_on_stack() nounwind {
; RV64I-WITHFP-NEXT: li a3, 0
; RV64I-WITHFP-NEXT: li a5, 0
; RV64I-WITHFP-NEXT: li a7, 0
-; RV64I-WITHFP-NEXT: call callee_float_on_stack@plt
+; RV64I-WITHFP-NEXT: call callee_float_on_stack
; RV64I-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-WITHFP-NEXT: addi sp, sp, 32
@@ -176,7 +176,7 @@ define i64 @caller_tiny_scalar_ret() nounwind {
; RV64I-FPELIM: # %bb.0:
; RV64I-FPELIM-NEXT: addi sp, sp, -16
; RV64I-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-FPELIM-NEXT: call callee_tiny_scalar_ret@plt
+; RV64I-FPELIM-NEXT: call callee_tiny_scalar_ret
; RV64I-FPELIM-NEXT: sext.w a0, a0
; RV64I-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-FPELIM-NEXT: addi sp, sp, 16
@@ -188,7 +188,7 @@ define i64 @caller_tiny_scalar_ret() nounwind {
; RV64I-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-WITHFP-NEXT: addi s0, sp, 16
-; RV64I-WITHFP-NEXT: call callee_tiny_scalar_ret@plt
+; RV64I-WITHFP-NEXT: call callee_tiny_scalar_ret
; RV64I-WITHFP-NEXT: sext.w a0, a0
; RV64I-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
index a74f7bbe2331..37d9eb6990b0 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll
@@ -43,7 +43,7 @@ define float @caller_onstack_f32_noop(float %a) nounwind {
; RV32IF-NEXT: li a3, 0
; RV32IF-NEXT: li a5, 0
; RV32IF-NEXT: li a7, 0
-; RV32IF-NEXT: call onstack_f32_noop@plt
+; RV32IF-NEXT: call onstack_f32_noop
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -70,7 +70,7 @@ define float @caller_onstack_f32_fadd(float %a, float %b) nounwind {
; RV32IF-NEXT: li a3, 0
; RV32IF-NEXT: li a5, 0
; RV32IF-NEXT: li a7, 0
-; RV32IF-NEXT: call onstack_f32_noop@plt
+; RV32IF-NEXT: call onstack_f32_noop
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
index ac060f9469ac..5bae6b1d7f54 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-sext-zext.ll
@@ -16,7 +16,7 @@ define void @pass_uint8_as_uint8(i8 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call receive_uint8@plt
+; RV32I-NEXT: call receive_uint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -31,7 +31,7 @@ define zeroext i8 @ret_callresult_uint8_as_uint8() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_uint8@plt
+; RV32I-NEXT: call return_uint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -57,7 +57,7 @@ define void @pass_uint8_as_sint8(i8 zeroext %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
-; RV32I-NEXT: call receive_sint8@plt
+; RV32I-NEXT: call receive_sint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -71,7 +71,7 @@ define signext i8 @ret_callresult_uint8_as_sint8() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_uint8@plt
+; RV32I-NEXT: call return_uint8
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -96,7 +96,7 @@ define void @pass_uint8_as_anyint32(i8 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call receive_anyint32@plt
+; RV32I-NEXT: call receive_anyint32
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -110,7 +110,7 @@ define signext i32 @ret_callresult_uint8_as_anyint32() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_uint8@plt
+; RV32I-NEXT: call return_uint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -133,7 +133,7 @@ define void @pass_sint8_as_uint8(i8 signext %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: call receive_uint8@plt
+; RV32I-NEXT: call receive_uint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -148,7 +148,7 @@ define zeroext i8 @ret_callresult_sint8_as_uint8() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_sint8@plt
+; RV32I-NEXT: call return_sint8
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -169,7 +169,7 @@ define void @pass_sint8_as_sint8(i8 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call receive_sint8@plt
+; RV32I-NEXT: call receive_sint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -182,7 +182,7 @@ define signext i8 @ret_callresult_sint8_as_sint8() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_sint8@plt
+; RV32I-NEXT: call return_sint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -203,7 +203,7 @@ define void @pass_sint8_as_anyint32(i8 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call receive_anyint32@plt
+; RV32I-NEXT: call receive_anyint32
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -217,7 +217,7 @@ define signext i32 @ret_callresult_sint8_as_anyint32() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_sint8@plt
+; RV32I-NEXT: call return_sint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -241,7 +241,7 @@ define void @pass_anyint32_as_uint8(i32 signext %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: andi a0, a0, 255
-; RV32I-NEXT: call receive_uint8@plt
+; RV32I-NEXT: call receive_uint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -257,7 +257,7 @@ define zeroext i8 @ret_callresult_anyint32_as_uint8() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_anyint32@plt
+; RV32I-NEXT: call return_anyint32
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -284,7 +284,7 @@ define void @pass_anyint32_as_sint8(i32 signext %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
-; RV32I-NEXT: call receive_sint8@plt
+; RV32I-NEXT: call receive_sint8
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -298,7 +298,7 @@ define signext i8 @ret_callresult_anyint32_as_sint8() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_anyint32@plt
+; RV32I-NEXT: call return_anyint32
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -321,7 +321,7 @@ define void @pass_anyint32_as_anyint32(i32 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call receive_anyint32@plt
+; RV32I-NEXT: call receive_anyint32
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -334,7 +334,7 @@ define signext i32 @ret_callresult_anyint32_as_anyint32() nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call return_anyint32@plt
+; RV32I-NEXT: call return_anyint32
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll b/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll
index 3e2af1136529..70cdb6cec244 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-vector-on-stack.ll
@@ -31,7 +31,7 @@ define void @bar() nounwind {
; CHECK-NEXT: li a6, 0
; CHECK-NEXT: li a7, 0
; CHECK-NEXT: vmv.v.i v16, 0
-; CHECK-NEXT: call foo@plt
+; CHECK-NEXT: call foo
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: addi sp, s0, -96
; CHECK-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/calls.ll b/llvm/test/CodeGen/RISCV/calls.ll
index e3459875362d..365f255dd824 100644
--- a/llvm/test/CodeGen/RISCV/calls.ll
+++ b/llvm/test/CodeGen/RISCV/calls.ll
@@ -11,7 +11,7 @@ define i32 @test_call_external(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call external_function@plt
+; RV32I-NEXT: call external_function
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -20,7 +20,7 @@ define i32 @test_call_external(i32 %a) nounwind {
; RV32I-PIC: # %bb.0:
; RV32I-PIC-NEXT: addi sp, sp, -16
; RV32I-PIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-PIC-NEXT: call external_function@plt
+; RV32I-PIC-NEXT: call external_function
; RV32I-PIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-PIC-NEXT: addi sp, sp, 16
; RV32I-PIC-NEXT: ret
@@ -71,7 +71,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call defined_function@plt
+; RV32I-NEXT: call defined_function
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -80,7 +80,7 @@ define i32 @test_call_defined(i32 %a) nounwind {
; RV32I-PIC: # %bb.0:
; RV32I-PIC-NEXT: addi sp, sp, -16
; RV32I-PIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-PIC-NEXT: call defined_function@plt
+; RV32I-PIC-NEXT: call defined_function
; RV32I-PIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-PIC-NEXT: addi sp, sp, 16
; RV32I-PIC-NEXT: ret
@@ -178,7 +178,7 @@ define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: call fastcc_function@plt
+; RV32I-NEXT: call fastcc_function
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -191,7 +191,7 @@ define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
; RV32I-PIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-PIC-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-PIC-NEXT: mv s0, a0
-; RV32I-PIC-NEXT: call fastcc_function@plt
+; RV32I-PIC-NEXT: call fastcc_function
; RV32I-PIC-NEXT: mv a0, s0
; RV32I-PIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-PIC-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -219,7 +219,7 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
; RV32I-NEXT: mv a5, a0
; RV32I-NEXT: mv a6, a0
; RV32I-NEXT: mv a7, a0
-; RV32I-NEXT: call external_many_args@plt
+; RV32I-NEXT: call external_many_args
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -241,7 +241,7 @@ define i32 @test_call_external_many_args(i32 %a) nounwind {
; RV32I-PIC-NEXT: mv a5, a0
; RV32I-PIC-NEXT: mv a6, a0
; RV32I-PIC-NEXT: mv a7, a0
-; RV32I-PIC-NEXT: call external_many_args@plt
+; RV32I-PIC-NEXT: call external_many_args
; RV32I-PIC-NEXT: mv a0, s0
; RV32I-PIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-PIC-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -282,7 +282,7 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
; RV32I-NEXT: mv a5, a0
; RV32I-NEXT: mv a6, a0
; RV32I-NEXT: mv a7, a0
-; RV32I-NEXT: call defined_many_args@plt
+; RV32I-NEXT: call defined_many_args
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -300,7 +300,7 @@ define i32 @test_call_defined_many_args(i32 %a) nounwind {
; RV32I-PIC-NEXT: mv a5, a0
; RV32I-PIC-NEXT: mv a6, a0
; RV32I-PIC-NEXT: mv a7, a0
-; RV32I-PIC-NEXT: call defined_many_args@plt
+; RV32I-PIC-NEXT: call defined_many_args
; RV32I-PIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-PIC-NEXT: addi sp, sp, 16
; RV32I-PIC-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
index 7992c2cda28d..2103c3e60b59 100644
--- a/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
+++ b/llvm/test/CodeGen/RISCV/cm_mvas_mvsa.ll
@@ -21,11 +21,11 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; CHECK32I-NEXT: mv s0, a1
; CHECK32I-NEXT: mv s1, a0
-; CHECK32I-NEXT: call func@plt
+; CHECK32I-NEXT: call func
; CHECK32I-NEXT: mv s2, a0
; CHECK32I-NEXT: mv a0, s1
; CHECK32I-NEXT: mv a1, s0
-; CHECK32I-NEXT: call func@plt
+; CHECK32I-NEXT: call func
; CHECK32I-NEXT: add a0, s2, s0
; CHECK32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -38,10 +38,10 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK32ZCMP: # %bb.0:
; CHECK32ZCMP-NEXT: cm.push {ra, s0-s2}, -16
; CHECK32ZCMP-NEXT: cm.mvsa01 s1, s0
-; CHECK32ZCMP-NEXT: call func@plt
+; CHECK32ZCMP-NEXT: call func
; CHECK32ZCMP-NEXT: mv s2, a0
; CHECK32ZCMP-NEXT: cm.mva01s s1, s0
-; CHECK32ZCMP-NEXT: call func@plt
+; CHECK32ZCMP-NEXT: call func
; CHECK32ZCMP-NEXT: add a0, s2, s0
; CHECK32ZCMP-NEXT: cm.popret {ra, s0-s2}, 16
;
@@ -54,11 +54,11 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; CHECK64I-NEXT: mv s0, a1
; CHECK64I-NEXT: mv s1, a0
-; CHECK64I-NEXT: call func@plt
+; CHECK64I-NEXT: call func
; CHECK64I-NEXT: mv s2, a0
; CHECK64I-NEXT: mv a0, s1
; CHECK64I-NEXT: mv a1, s0
-; CHECK64I-NEXT: call func@plt
+; CHECK64I-NEXT: call func
; CHECK64I-NEXT: addw a0, s2, s0
; CHECK64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -71,10 +71,10 @@ define i32 @zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK64ZCMP: # %bb.0:
; CHECK64ZCMP-NEXT: cm.push {ra, s0-s2}, -32
; CHECK64ZCMP-NEXT: cm.mvsa01 s1, s0
-; CHECK64ZCMP-NEXT: call func@plt
+; CHECK64ZCMP-NEXT: call func
; CHECK64ZCMP-NEXT: mv s2, a0
; CHECK64ZCMP-NEXT: cm.mva01s s1, s0
-; CHECK64ZCMP-NEXT: call func@plt
+; CHECK64ZCMP-NEXT: call func
; CHECK64ZCMP-NEXT: addw a0, s2, s0
; CHECK64ZCMP-NEXT: cm.popret {ra, s0-s2}, 32
%call = call i32 @func(i32 %num, i32 %f)
@@ -91,15 +91,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; CHECK32I-NEXT: mv s0, a1
-; CHECK32I-NEXT: call foo@plt
+; CHECK32I-NEXT: call foo
; CHECK32I-NEXT: mv s1, a0
; CHECK32I-NEXT: mv a0, s0
-; CHECK32I-NEXT: call foo@plt
+; CHECK32I-NEXT: call foo
; CHECK32I-NEXT: mv a0, s1
-; CHECK32I-NEXT: call foo@plt
+; CHECK32I-NEXT: call foo
; CHECK32I-NEXT: li a0, 1
; CHECK32I-NEXT: mv a1, s0
-; CHECK32I-NEXT: call func@plt
+; CHECK32I-NEXT: call func
; CHECK32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -110,15 +110,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK32ZCMP: # %bb.0:
; CHECK32ZCMP-NEXT: cm.push {ra, s0-s1}, -16
; CHECK32ZCMP-NEXT: mv s0, a1
-; CHECK32ZCMP-NEXT: call foo@plt
+; CHECK32ZCMP-NEXT: call foo
; CHECK32ZCMP-NEXT: mv s1, a0
; CHECK32ZCMP-NEXT: mv a0, s0
-; CHECK32ZCMP-NEXT: call foo@plt
+; CHECK32ZCMP-NEXT: call foo
; CHECK32ZCMP-NEXT: mv a0, s1
-; CHECK32ZCMP-NEXT: call foo@plt
+; CHECK32ZCMP-NEXT: call foo
; CHECK32ZCMP-NEXT: li a0, 1
; CHECK32ZCMP-NEXT: mv a1, s0
-; CHECK32ZCMP-NEXT: call func@plt
+; CHECK32ZCMP-NEXT: call func
; CHECK32ZCMP-NEXT: cm.popret {ra, s0-s1}, 16
;
; CHECK64I-LABEL: not_zcmp_mv:
@@ -128,15 +128,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; CHECK64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; CHECK64I-NEXT: mv s0, a1
-; CHECK64I-NEXT: call foo@plt
+; CHECK64I-NEXT: call foo
; CHECK64I-NEXT: mv s1, a0
; CHECK64I-NEXT: mv a0, s0
-; CHECK64I-NEXT: call foo@plt
+; CHECK64I-NEXT: call foo
; CHECK64I-NEXT: mv a0, s1
-; CHECK64I-NEXT: call foo@plt
+; CHECK64I-NEXT: call foo
; CHECK64I-NEXT: li a0, 1
; CHECK64I-NEXT: mv a1, s0
-; CHECK64I-NEXT: call func@plt
+; CHECK64I-NEXT: call func
; CHECK64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; CHECK64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; CHECK64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -147,15 +147,15 @@ define i32 @not_zcmp_mv(i32 %num, i32 %f) nounwind {
; CHECK64ZCMP: # %bb.0:
; CHECK64ZCMP-NEXT: cm.push {ra, s0-s1}, -32
; CHECK64ZCMP-NEXT: mv s0, a1
-; CHECK64ZCMP-NEXT: call foo@plt
+; CHECK64ZCMP-NEXT: call foo
; CHECK64ZCMP-NEXT: mv s1, a0
; CHECK64ZCMP-NEXT: mv a0, s0
-; CHECK64ZCMP-NEXT: call foo@plt
+; CHECK64ZCMP-NEXT: call foo
; CHECK64ZCMP-NEXT: mv a0, s1
-; CHECK64ZCMP-NEXT: call foo@plt
+; CHECK64ZCMP-NEXT: call foo
; CHECK64ZCMP-NEXT: li a0, 1
; CHECK64ZCMP-NEXT: mv a1, s0
-; CHECK64ZCMP-NEXT: call func@plt
+; CHECK64ZCMP-NEXT: call func
; CHECK64ZCMP-NEXT: cm.popret {ra, s0-s1}, 32
%call = call i32 @foo(i32 %num)
%call1 = call i32 @foo(i32 %f)
diff --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll
index bce6707781c0..23f219c2487e 100644
--- a/llvm/test/CodeGen/RISCV/condops.ll
+++ b/llvm/test/CodeGen/RISCV/condops.ll
@@ -3092,7 +3092,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
; RV32I-NEXT: .LBB56_1: # %bb2
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call bar@plt
+; RV32I-NEXT: call bar
; RV32I-NEXT: sll s1, s1, s0
; RV32I-NEXT: bnez a0, .LBB56_1
; RV32I-NEXT: # %bb.2: # %bb7
@@ -3115,7 +3115,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
; RV64I-NEXT: .LBB56_1: # %bb2
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call bar@plt
+; RV64I-NEXT: call bar
; RV64I-NEXT: sllw s1, s1, s0
; RV64I-NEXT: bnez a0, .LBB56_1
; RV64I-NEXT: # %bb.2: # %bb7
@@ -3137,7 +3137,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
; RV64XVENTANACONDOPS-NEXT: .LBB56_1: # %bb2
; RV64XVENTANACONDOPS-NEXT: # =>This Inner Loop Header: Depth=1
; RV64XVENTANACONDOPS-NEXT: mv a0, s1
-; RV64XVENTANACONDOPS-NEXT: call bar@plt
+; RV64XVENTANACONDOPS-NEXT: call bar
; RV64XVENTANACONDOPS-NEXT: sllw s1, s1, s0
; RV64XVENTANACONDOPS-NEXT: bnez a0, .LBB56_1
; RV64XVENTANACONDOPS-NEXT: # %bb.2: # %bb7
@@ -3160,7 +3160,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
; RV64XTHEADCONDMOV-NEXT: .LBB56_1: # %bb2
; RV64XTHEADCONDMOV-NEXT: # =>This Inner Loop Header: Depth=1
; RV64XTHEADCONDMOV-NEXT: sext.w a0, s1
-; RV64XTHEADCONDMOV-NEXT: call bar@plt
+; RV64XTHEADCONDMOV-NEXT: call bar
; RV64XTHEADCONDMOV-NEXT: sllw s1, s1, s0
; RV64XTHEADCONDMOV-NEXT: bnez a0, .LBB56_1
; RV64XTHEADCONDMOV-NEXT: # %bb.2: # %bb7
@@ -3182,7 +3182,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
; RV32ZICOND-NEXT: .LBB56_1: # %bb2
; RV32ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
; RV32ZICOND-NEXT: mv a0, s1
-; RV32ZICOND-NEXT: call bar@plt
+; RV32ZICOND-NEXT: call bar
; RV32ZICOND-NEXT: sll s1, s1, s0
; RV32ZICOND-NEXT: bnez a0, .LBB56_1
; RV32ZICOND-NEXT: # %bb.2: # %bb7
@@ -3204,7 +3204,7 @@ define void @sextw_removal_maskc(i1 %c, i32 signext %arg, i32 signext %arg1) nou
; RV64ZICOND-NEXT: .LBB56_1: # %bb2
; RV64ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
; RV64ZICOND-NEXT: mv a0, s1
-; RV64ZICOND-NEXT: call bar@plt
+; RV64ZICOND-NEXT: call bar
; RV64ZICOND-NEXT: sllw s1, s1, s0
; RV64ZICOND-NEXT: bnez a0, .LBB56_1
; RV64ZICOND-NEXT: # %bb.2: # %bb7
@@ -3243,7 +3243,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
; RV32I-NEXT: .LBB57_1: # %bb2
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call bar@plt
+; RV32I-NEXT: call bar
; RV32I-NEXT: sll s1, s1, s0
; RV32I-NEXT: bnez a0, .LBB57_1
; RV32I-NEXT: # %bb.2: # %bb7
@@ -3266,7 +3266,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
; RV64I-NEXT: .LBB57_1: # %bb2
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call bar@plt
+; RV64I-NEXT: call bar
; RV64I-NEXT: sllw s1, s1, s0
; RV64I-NEXT: bnez a0, .LBB57_1
; RV64I-NEXT: # %bb.2: # %bb7
@@ -3288,7 +3288,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
; RV64XVENTANACONDOPS-NEXT: .LBB57_1: # %bb2
; RV64XVENTANACONDOPS-NEXT: # =>This Inner Loop Header: Depth=1
; RV64XVENTANACONDOPS-NEXT: mv a0, s1
-; RV64XVENTANACONDOPS-NEXT: call bar@plt
+; RV64XVENTANACONDOPS-NEXT: call bar
; RV64XVENTANACONDOPS-NEXT: sllw s1, s1, s0
; RV64XVENTANACONDOPS-NEXT: bnez a0, .LBB57_1
; RV64XVENTANACONDOPS-NEXT: # %bb.2: # %bb7
@@ -3311,7 +3311,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
; RV64XTHEADCONDMOV-NEXT: .LBB57_1: # %bb2
; RV64XTHEADCONDMOV-NEXT: # =>This Inner Loop Header: Depth=1
; RV64XTHEADCONDMOV-NEXT: sext.w a0, s1
-; RV64XTHEADCONDMOV-NEXT: call bar@plt
+; RV64XTHEADCONDMOV-NEXT: call bar
; RV64XTHEADCONDMOV-NEXT: sllw s1, s1, s0
; RV64XTHEADCONDMOV-NEXT: bnez a0, .LBB57_1
; RV64XTHEADCONDMOV-NEXT: # %bb.2: # %bb7
@@ -3333,7 +3333,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
; RV32ZICOND-NEXT: .LBB57_1: # %bb2
; RV32ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
; RV32ZICOND-NEXT: mv a0, s1
-; RV32ZICOND-NEXT: call bar@plt
+; RV32ZICOND-NEXT: call bar
; RV32ZICOND-NEXT: sll s1, s1, s0
; RV32ZICOND-NEXT: bnez a0, .LBB57_1
; RV32ZICOND-NEXT: # %bb.2: # %bb7
@@ -3355,7 +3355,7 @@ define void @sextw_removal_maskcn(i1 %c, i32 signext %arg, i32 signext %arg1) no
; RV64ZICOND-NEXT: .LBB57_1: # %bb2
; RV64ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
; RV64ZICOND-NEXT: mv a0, s1
-; RV64ZICOND-NEXT: call bar@plt
+; RV64ZICOND-NEXT: call bar
; RV64ZICOND-NEXT: sllw s1, s1, s0
; RV64ZICOND-NEXT: bnez a0, .LBB57_1
; RV64ZICOND-NEXT: # %bb.2: # %bb7
@@ -3505,7 +3505,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV32I-NEXT: beqz a1, .LBB60_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call bat@plt
+; RV32I-NEXT: call bat
; RV32I-NEXT: .LBB60_4:
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3526,7 +3526,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV64I-NEXT: beqz a1, .LBB60_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call bat@plt
+; RV64I-NEXT: call bat
; RV64I-NEXT: .LBB60_4:
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -3545,7 +3545,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV64XVENTANACONDOPS-NEXT: beqz a1, .LBB60_2
; RV64XVENTANACONDOPS-NEXT: # %bb.1:
; RV64XVENTANACONDOPS-NEXT: mv a0, s0
-; RV64XVENTANACONDOPS-NEXT: call bat@plt
+; RV64XVENTANACONDOPS-NEXT: call bat
; RV64XVENTANACONDOPS-NEXT: .LBB60_2:
; RV64XVENTANACONDOPS-NEXT: mv a0, s0
; RV64XVENTANACONDOPS-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -3563,7 +3563,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV64XTHEADCONDMOV-NEXT: beqz a1, .LBB60_2
; RV64XTHEADCONDMOV-NEXT: # %bb.1:
; RV64XTHEADCONDMOV-NEXT: mv a0, s0
-; RV64XTHEADCONDMOV-NEXT: call bat@plt
+; RV64XTHEADCONDMOV-NEXT: call bat
; RV64XTHEADCONDMOV-NEXT: .LBB60_2:
; RV64XTHEADCONDMOV-NEXT: mv a0, s0
; RV64XTHEADCONDMOV-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -3582,7 +3582,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV32ZICOND-NEXT: beqz a1, .LBB60_2
; RV32ZICOND-NEXT: # %bb.1:
; RV32ZICOND-NEXT: mv a0, s0
-; RV32ZICOND-NEXT: call bat@plt
+; RV32ZICOND-NEXT: call bat
; RV32ZICOND-NEXT: .LBB60_2:
; RV32ZICOND-NEXT: mv a0, s0
; RV32ZICOND-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3601,7 +3601,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV64ZICOND-NEXT: beqz a1, .LBB60_2
; RV64ZICOND-NEXT: # %bb.1:
; RV64ZICOND-NEXT: mv a0, s0
-; RV64ZICOND-NEXT: call bat@plt
+; RV64ZICOND-NEXT: call bat
; RV64ZICOND-NEXT: .LBB60_2:
; RV64ZICOND-NEXT: mv a0, s0
; RV64ZICOND-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/copysign-casts.ll b/llvm/test/CodeGen/RISCV/copysign-casts.ll
index 931db00d06f8..accd52369fa1 100644
--- a/llvm/test/CodeGen/RISCV/copysign-casts.ll
+++ b/llvm/test/CodeGen/RISCV/copysign-casts.ll
@@ -164,7 +164,7 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fmv.s fa0, fa1
-; RV32IFD-NEXT: call __extendhfsf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
; RV32IFD-NEXT: fcvt.d.s fa5, fa0
; RV32IFD-NEXT: fsgnj.d fa0, fs0, fa5
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -179,7 +179,7 @@ define double @fold_promote_d_h(double %a, half %b) nounwind {
; RV64IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fmv.d fs0, fa0
; RV64IFD-NEXT: fmv.s fa0, fa1
-; RV64IFD-NEXT: call __extendhfsf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
; RV64IFD-NEXT: fcvt.d.s fa5, fa0
; RV64IFD-NEXT: fsgnj.d fa0, fs0, fa5
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -265,7 +265,7 @@ define float @fold_promote_f_h(float %a, half %b) nounwind {
; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fmv.s fs0, fa0
; RV32IF-NEXT: fmv.s fa0, fa1
-; RV32IF-NEXT: call __extendhfsf2@plt
+; RV32IF-NEXT: call __extendhfsf2
; RV32IF-NEXT: fsgnj.s fa0, fs0, fa0
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload
@@ -279,7 +279,7 @@ define float @fold_promote_f_h(float %a, half %b) nounwind {
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fmv.s fs0, fa0
; RV32IFD-NEXT: fmv.s fa0, fa1
-; RV32IFD-NEXT: call __extendhfsf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
; RV32IFD-NEXT: fsgnj.s fa0, fs0, fa0
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
@@ -293,7 +293,7 @@ define float @fold_promote_f_h(float %a, half %b) nounwind {
; RV64IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fmv.s fs0, fa0
; RV64IFD-NEXT: fmv.s fa0, fa1
-; RV64IFD-NEXT: call __extendhfsf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
; RV64IFD-NEXT: fsgnj.s fa0, fs0, fa0
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index da67176e3f0c..455e6e54c9b3 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -244,7 +244,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI2_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI2_0)
@@ -268,7 +268,7 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI2_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI2_0)
@@ -381,14 +381,14 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi s3, a1, 1329
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, %hi(.LCPI3_0)
; RV32I-NEXT: addi s4, a0, %lo(.LCPI3_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s2, .LBB3_3
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: li a0, 32
@@ -426,7 +426,7 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, %hi(.LCPI3_0)
; RV64I-NEXT: ld a1, %lo(.LCPI3_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI3_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI3_1)
@@ -706,7 +706,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI6_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI6_0)
@@ -724,7 +724,7 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
@@ -812,14 +812,14 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi s3, a1, 1329
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a0, %hi(.LCPI7_0)
; RV32I-NEXT: addi s4, a0, %lo(.LCPI7_0)
; RV32I-NEXT: neg a0, s1
; RV32I-NEXT: and a0, s1, a0
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s2, .LBB7_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srli a0, a0, 27
@@ -850,7 +850,7 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, %hi(.LCPI7_0)
; RV64I-NEXT: ld a1, %lo(.LCPI7_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI7_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI7_1)
@@ -1191,7 +1191,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1236,7 +1236,7 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1395,7 +1395,7 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s3, a1, 257
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: srli a0, s2, 1
; RV32I-NEXT: or a0, s2, a0
@@ -1419,7 +1419,7 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s6
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s0, .LBB11_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srli a0, a0, 24
@@ -1485,7 +1485,7 @@ define i64 @test_ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1862,7 +1862,7 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1901,7 +1901,7 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2046,7 +2046,7 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s3, a1, 257
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: srli a0, s2, 1
; RV32I-NEXT: or a0, s2, a0
@@ -2070,7 +2070,7 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s6
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s0, .LBB15_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srli a0, a0, 24
@@ -2134,7 +2134,7 @@ define i64 @test_ctlz_i64_zero_undef(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2484,7 +2484,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -2512,7 +2512,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2598,7 +2598,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV32XTHEADBB-NEXT: and a0, a0, a1
; RV32XTHEADBB-NEXT: lui a1, 4112
; RV32XTHEADBB-NEXT: addi a1, a1, 257
-; RV32XTHEADBB-NEXT: call __mulsi3@plt
+; RV32XTHEADBB-NEXT: call __mulsi3
; RV32XTHEADBB-NEXT: srli a0, a0, 24
; RV32XTHEADBB-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32XTHEADBB-NEXT: addi sp, sp, 16
@@ -2626,7 +2626,7 @@ define i32 @test_ctpop_i32(i32 %a) nounwind {
; RV64XTHEADBB-NEXT: and a0, a0, a1
; RV64XTHEADBB-NEXT: lui a1, 4112
; RV64XTHEADBB-NEXT: addiw a1, a1, 257
-; RV64XTHEADBB-NEXT: call __muldi3@plt
+; RV64XTHEADBB-NEXT: call __muldi3
; RV64XTHEADBB-NEXT: srliw a0, a0, 24
; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64XTHEADBB-NEXT: addi sp, sp, 16
@@ -2666,7 +2666,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s1, a1, 257
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli s5, a0, 24
; RV32I-NEXT: srli a0, s0, 1
; RV32I-NEXT: and a0, a0, s2
@@ -2679,7 +2679,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s4
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: add a0, a0, s5
; RV32I-NEXT: li a1, 0
@@ -2723,7 +2723,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -2842,7 +2842,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV32XTHEADBB-NEXT: lui a1, 4112
; RV32XTHEADBB-NEXT: addi s1, a1, 257
; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3@plt
+; RV32XTHEADBB-NEXT: call __mulsi3
; RV32XTHEADBB-NEXT: srli s5, a0, 24
; RV32XTHEADBB-NEXT: srli a0, s0, 1
; RV32XTHEADBB-NEXT: and a0, a0, s2
@@ -2855,7 +2855,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV32XTHEADBB-NEXT: add a0, a0, a1
; RV32XTHEADBB-NEXT: and a0, a0, s4
; RV32XTHEADBB-NEXT: mv a1, s1
-; RV32XTHEADBB-NEXT: call __mulsi3@plt
+; RV32XTHEADBB-NEXT: call __mulsi3
; RV32XTHEADBB-NEXT: srli a0, a0, 24
; RV32XTHEADBB-NEXT: add a0, a0, s5
; RV32XTHEADBB-NEXT: li a1, 0
@@ -2899,7 +2899,7 @@ define i64 @test_ctpop_i64(i64 %a) nounwind {
; RV64XTHEADBB-NEXT: addiw a1, a1, 257
; RV64XTHEADBB-NEXT: slli a2, a1, 32
; RV64XTHEADBB-NEXT: add a1, a1, a2
-; RV64XTHEADBB-NEXT: call __muldi3@plt
+; RV64XTHEADBB-NEXT: call __muldi3
; RV64XTHEADBB-NEXT: srli a0, a0, 56
; RV64XTHEADBB-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64XTHEADBB-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
index 9bfd30daf7d4..a60fd26f4959 100644
--- a/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
+++ b/llvm/test/CodeGen/RISCV/ctz_zero_return_test.ll
@@ -46,14 +46,14 @@ define signext i32 @ctz_dereferencing_pointer(i64* %b) nounwind {
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi s1, a1, 1329
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a0, %hi(.LCPI0_0)
; RV32I-NEXT: addi s3, a0, %lo(.LCPI0_0)
; RV32I-NEXT: neg a0, s4
; RV32I-NEXT: and a0, s4, a0
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s4, .LBB0_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: li a0, 32
@@ -91,7 +91,7 @@ define signext i32 @ctz_dereferencing_pointer(i64* %b) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, %hi(.LCPI0_0)
; RV64I-NEXT: ld a1, %lo(.LCPI0_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI0_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI0_1)
@@ -144,7 +144,7 @@ define i64 @ctz_dereferencing_pointer_zext(i32* %b) nounwind {
; RV32I-NEXT: and a0, s0, a0
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI1_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI1_0)
@@ -170,7 +170,7 @@ define i64 @ctz_dereferencing_pointer_zext(i32* %b) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI1_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI1_0)
@@ -220,7 +220,7 @@ define signext i32 @ctz1(i32 signext %x) nounwind {
; RV32I-NEXT: and a0, s0, a0
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI2_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI2_0)
@@ -245,7 +245,7 @@ define signext i32 @ctz1(i32 signext %x) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI2_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI2_0)
@@ -293,7 +293,7 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind {
; RV32I-NEXT: and a0, s0, a0
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI3_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI3_0)
@@ -318,7 +318,7 @@ define signext i32 @ctz1_flipped(i32 signext %x) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI3_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI3_0)
@@ -364,7 +364,7 @@ define signext i32 @ctz2(i32 signext %x) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI4_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI4_0)
@@ -387,7 +387,7 @@ define signext i32 @ctz2(i32 signext %x) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI4_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI4_0)
@@ -429,7 +429,7 @@ define signext i32 @ctz3(i32 signext %x) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI5_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI5_0)
@@ -452,7 +452,7 @@ define signext i32 @ctz3(i32 signext %x) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI5_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI5_0)
@@ -509,14 +509,14 @@ define signext i32 @ctz4(i64 %b) nounwind {
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi s3, a1, 1329
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, %hi(.LCPI6_0)
; RV32I-NEXT: addi s4, a0, %lo(.LCPI6_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s2, .LBB6_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: li a0, 32
@@ -554,7 +554,7 @@ define signext i32 @ctz4(i64 %b) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
; RV64I-NEXT: ld a1, %lo(.LCPI6_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI6_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_1)
@@ -643,7 +643,7 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s3, a1, 257
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: srli a0, s2, 1
; RV32I-NEXT: or a0, s2, a0
@@ -667,7 +667,7 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s6
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s0, .LBB7_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: srli a0, a0, 24
@@ -731,7 +731,7 @@ define signext i32 @ctlz(i64 %b) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: slli a0, a0, 2
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -772,7 +772,7 @@ define signext i32 @ctz5(i32 signext %x) nounwind {
; RV32I-NEXT: and a0, s0, a0
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI8_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI8_0)
@@ -797,7 +797,7 @@ define signext i32 @ctz5(i32 signext %x) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI8_0)
@@ -845,7 +845,7 @@ define signext i32 @ctz6(i32 signext %x) nounwind {
; RV32I-NEXT: and a0, s0, a0
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI9_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI9_0)
@@ -870,7 +870,7 @@ define signext i32 @ctz6(i32 signext %x) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI9_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
@@ -923,7 +923,7 @@ define signext i32 @globalVar() nounwind {
; RV32I-NEXT: and a0, s0, a0
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI10_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI10_0)
@@ -949,7 +949,7 @@ define signext i32 @globalVar() nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_0)
diff --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll
index bf19bbd8b131..91ac7c5ddae3 100644
--- a/llvm/test/CodeGen/RISCV/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll
@@ -121,7 +121,7 @@ define i64 @udiv64_constant_add(i64 %a) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 7
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __udivdi3@plt
+; RV32-NEXT: call __udivdi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -383,7 +383,7 @@ define i64 @sdiv64_constant_no_srai(i64 %a) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 3
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __divdi3@plt
+; RV32-NEXT: call __divdi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -407,7 +407,7 @@ define i64 @sdiv64_constant_srai(i64 %a) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 5
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __divdi3@plt
+; RV32-NEXT: call __divdi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -432,7 +432,7 @@ define i64 @sdiv64_constant_add_srai(i64 %a) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 15
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __divdi3@plt
+; RV32-NEXT: call __divdi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -460,7 +460,7 @@ define i64 @sdiv64_constant_sub_srai(i64 %a) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, -3
; RV32-NEXT: li a3, -1
-; RV32-NEXT: call __divdi3@plt
+; RV32-NEXT: call __divdi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll
index c455b439f0b1..99c83b99497d 100644
--- a/llvm/test/CodeGen/RISCV/div.ll
+++ b/llvm/test/CodeGen/RISCV/div.ll
@@ -11,7 +11,7 @@
define i32 @udiv(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: udiv:
; RV32I: # %bb.0:
-; RV32I-NEXT: tail __udivsi3@plt
+; RV32I-NEXT: tail __udivsi3
;
; RV32IM-LABEL: udiv:
; RV32IM: # %bb.0:
@@ -26,7 +26,7 @@ define i32 @udiv(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -43,7 +43,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
; RV32I-LABEL: udiv_constant:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: tail __udivsi3@plt
+; RV32I-NEXT: tail __udivsi3
;
; RV32IM-LABEL: udiv_constant:
; RV32IM: # %bb.0:
@@ -60,7 +60,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -107,7 +107,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: li a0, 10
-; RV32I-NEXT: tail __udivsi3@plt
+; RV32I-NEXT: tail __udivsi3
;
; RV32IM-LABEL: udiv_constant_lhs:
; RV32IM: # %bb.0:
@@ -122,7 +122,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a1, a0, 32
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -141,7 +141,7 @@ define i64 @udiv64(i64 %a, i64 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __udivdi3@plt
+; RV32I-NEXT: call __udivdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -150,14 +150,14 @@ define i64 @udiv64(i64 %a, i64 %b) nounwind {
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -16
; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT: call __udivdi3@plt
+; RV32IM-NEXT: call __udivdi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
;
; RV64I-LABEL: udiv64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __udivdi3@plt
+; RV64I-NEXT: tail __udivdi3
;
; RV64IM-LABEL: udiv64:
; RV64IM: # %bb.0:
@@ -174,7 +174,7 @@ define i64 @udiv64_constant(i64 %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __udivdi3@plt
+; RV32I-NEXT: call __udivdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -206,7 +206,7 @@ define i64 @udiv64_constant(i64 %a) nounwind {
; RV64I-LABEL: udiv64_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: tail __udivdi3@plt
+; RV64I-NEXT: tail __udivdi3
;
; RV64IM-LABEL: udiv64_constant:
; RV64IM: # %bb.0:
@@ -230,7 +230,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: li a0, 10
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __udivdi3@plt
+; RV32I-NEXT: call __udivdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -243,7 +243,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
; RV32IM-NEXT: mv a2, a0
; RV32IM-NEXT: li a0, 10
; RV32IM-NEXT: li a1, 0
-; RV32IM-NEXT: call __udivdi3@plt
+; RV32IM-NEXT: call __udivdi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -252,7 +252,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __udivdi3@plt
+; RV64I-NEXT: tail __udivdi3
;
; RV64IM-LABEL: udiv64_constant_lhs:
; RV64IM: # %bb.0:
@@ -270,7 +270,7 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -288,7 +288,7 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -310,7 +310,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -329,7 +329,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -380,7 +380,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: andi a1, a0, 255
; RV32I-NEXT: li a0, 10
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -398,7 +398,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a1, a0, 255
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -422,7 +422,7 @@ define i16 @udiv16(i16 %a, i16 %b) nounwind {
; RV32I-NEXT: addi a2, a2, -1
; RV32I-NEXT: and a0, a0, a2
; RV32I-NEXT: and a1, a1, a2
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -444,7 +444,7 @@ define i16 @udiv16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: addiw a2, a2, -1
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -469,7 +469,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -489,7 +489,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -542,7 +542,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a1, a0, 16
; RV32I-NEXT: li a0, 10
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -562,7 +562,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a1, a0, 48
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -581,7 +581,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
define i32 @sdiv(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: sdiv:
; RV32I: # %bb.0:
-; RV32I-NEXT: tail __divsi3@plt
+; RV32I-NEXT: tail __divsi3
;
; RV32IM-LABEL: sdiv:
; RV32IM: # %bb.0:
@@ -594,7 +594,7 @@ define i32 @sdiv(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -611,7 +611,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
; RV32I-LABEL: sdiv_constant:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: tail __divsi3@plt
+; RV32I-NEXT: tail __divsi3
;
; RV32IM-LABEL: sdiv_constant:
; RV32IM: # %bb.0:
@@ -629,7 +629,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -725,7 +725,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: li a0, -10
-; RV32I-NEXT: tail __divsi3@plt
+; RV32I-NEXT: tail __divsi3
;
; RV32IM-LABEL: sdiv_constant_lhs:
; RV32IM: # %bb.0:
@@ -739,7 +739,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -758,7 +758,7 @@ define i64 @sdiv64(i64 %a, i64 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __divdi3@plt
+; RV32I-NEXT: call __divdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -767,14 +767,14 @@ define i64 @sdiv64(i64 %a, i64 %b) nounwind {
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -16
; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT: call __divdi3@plt
+; RV32IM-NEXT: call __divdi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
;
; RV64I-LABEL: sdiv64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64:
; RV64IM: # %bb.0:
@@ -791,7 +791,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 5
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __divdi3@plt
+; RV32I-NEXT: call __divdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -802,7 +802,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: li a2, 5
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __divdi3@plt
+; RV32IM-NEXT: call __divdi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -810,7 +810,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
; RV64I-LABEL: sdiv64_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64_constant:
; RV64IM: # %bb.0:
@@ -834,7 +834,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: li a0, 10
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __divdi3@plt
+; RV32I-NEXT: call __divdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -847,7 +847,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
; RV32IM-NEXT: mv a2, a0
; RV32IM-NEXT: li a0, 10
; RV32IM-NEXT: li a1, 0
-; RV32IM-NEXT: call __divdi3@plt
+; RV32IM-NEXT: call __divdi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -856,7 +856,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64_constant_lhs:
; RV64IM: # %bb.0:
@@ -878,7 +878,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srai a3, a2, 31
-; RV32I-NEXT: call __divdi3@plt
+; RV32I-NEXT: call __divdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -890,7 +890,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
; RV32IM-NEXT: mv a2, a1
; RV32IM-NEXT: srai a1, a0, 31
; RV32IM-NEXT: srai a3, a2, 31
-; RV32IM-NEXT: call __divdi3@plt
+; RV32IM-NEXT: call __divdi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -899,7 +899,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64_sext_operands:
; RV64IM: # %bb.0:
@@ -922,7 +922,7 @@ define i8 @sdiv8(i8 %a, i8 %b) nounwind {
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: srai a1, a1, 24
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -944,7 +944,7 @@ define i8 @sdiv8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: srai a1, a1, 56
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -969,7 +969,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -993,7 +993,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1069,7 +1069,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a0, 24
; RV32I-NEXT: li a0, -10
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1089,7 +1089,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a0, 56
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1114,7 +1114,7 @@ define i16 @sdiv16(i16 %a, i16 %b) nounwind {
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: srai a1, a1, 16
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1136,7 +1136,7 @@ define i16 @sdiv16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srai a1, a1, 48
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1161,7 +1161,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: li a1, 5
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1185,7 +1185,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1261,7 +1261,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a1, a0, 16
; RV32I-NEXT: li a0, -10
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1281,7 +1281,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a1, a0, 48
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index c324cc88f84b..186175537772 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -50,7 +50,7 @@ define double @fadd_d(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -59,7 +59,7 @@ define double @fadd_d(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -102,7 +102,7 @@ define double @fsub_d(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __subdf3@plt
+; RV32I-NEXT: call __subdf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -111,7 +111,7 @@ define double @fsub_d(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __subdf3@plt
+; RV64I-NEXT: call __subdf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -154,7 +154,7 @@ define double @fmul_d(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -163,7 +163,7 @@ define double @fmul_d(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -206,7 +206,7 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __divdf3@plt
+; RV32I-NEXT: call __divdf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -215,7 +215,7 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __divdf3@plt
+; RV64I-NEXT: call __divdf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -254,7 +254,7 @@ define double @fsqrt_d(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrt@plt
+; RV32I-NEXT: call sqrt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -263,7 +263,7 @@ define double @fsqrt_d(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrt@plt
+; RV64I-NEXT: call sqrt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -277,7 +277,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call fmin@plt
+; RV32IFD-NEXT: call fmin
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -286,7 +286,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call fmin@plt
+; RV64IFD-NEXT: call fmin
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -295,7 +295,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call fmin@plt
+; RV32IZFINXZDINX-NEXT: call fmin
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -304,7 +304,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call fmin@plt
+; RV64IZFINXZDINX-NEXT: call fmin
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -313,7 +313,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmin@plt
+; RV32I-NEXT: call fmin
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -322,7 +322,7 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmin@plt
+; RV64I-NEXT: call fmin
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -336,7 +336,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call fmax@plt
+; RV32IFD-NEXT: call fmax
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -345,7 +345,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call fmax@plt
+; RV64IFD-NEXT: call fmax
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -354,7 +354,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call fmax@plt
+; RV32IZFINXZDINX-NEXT: call fmax
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -363,7 +363,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call fmax@plt
+; RV64IZFINXZDINX-NEXT: call fmax
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -372,7 +372,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmax@plt
+; RV32I-NEXT: call fmax
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -381,7 +381,7 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmax@plt
+; RV64I-NEXT: call fmax
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -428,7 +428,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -437,7 +437,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -508,7 +508,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a4, a0
; RV32I-NEXT: lui a5, 524288
; RV32I-NEXT: xor a5, a1, a5
@@ -516,7 +516,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -535,13 +535,13 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a2, a0, a1
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -619,14 +619,14 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv s3, a2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a4, a0
; RV32I-NEXT: lui a5, 524288
; RV32I-NEXT: xor a2, s5, a5
@@ -635,7 +635,7 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: mv a2, s3
; RV32I-NEXT: mv a3, s2
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -656,18 +656,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a2, a1, 63
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -750,14 +750,14 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a4, a0
; RV32I-NEXT: lui a5, 524288
; RV32I-NEXT: xor a3, s5, a5
@@ -765,7 +765,7 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s4
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -787,17 +787,17 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a2, a1, 63
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -872,14 +872,14 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv s3, a2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: mv a2, s3
; RV32I-NEXT: mv a3, s2
; RV32I-NEXT: mv a4, s1
; RV32I-NEXT: mv a5, s0
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -897,13 +897,13 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -977,7 +977,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: lui a3, 524288
; RV32I-NEXT: xor a3, a1, a3
@@ -985,7 +985,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a4, s1
; RV32I-NEXT: mv a5, s0
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1004,13 +1004,13 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a1, a0, a1
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index 7e2964ef68b1..82ddf06187d3 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -51,7 +51,7 @@ define double @fadd_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -60,7 +60,7 @@ define double @fadd_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -102,7 +102,7 @@ define double @fsub_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __subdf3@plt
+; RV32I-NEXT: call __subdf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -111,7 +111,7 @@ define double @fsub_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __subdf3@plt
+; RV64I-NEXT: call __subdf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -153,7 +153,7 @@ define double @fmul_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -162,7 +162,7 @@ define double @fmul_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -204,7 +204,7 @@ define double @fdiv_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __divdf3@plt
+; RV32I-NEXT: call __divdf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -213,7 +213,7 @@ define double @fdiv_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __divdf3@plt
+; RV64I-NEXT: call __divdf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -253,7 +253,7 @@ define double @fsqrt_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrt@plt
+; RV32I-NEXT: call sqrt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -262,7 +262,7 @@ define double @fsqrt_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrt@plt
+; RV64I-NEXT: call sqrt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -359,11 +359,11 @@ define i32 @fneg_d(double %a, double %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a3, a1
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lui a3, 524288
; RV32I-NEXT: xor a3, a1, a3
; RV32I-NEXT: mv a2, a0
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -374,11 +374,11 @@ define i32 @fneg_d(double %a, double %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a1, a0, a1
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -494,12 +494,12 @@ define double @fabs_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a3, a1
; RV32I-NEXT: slli a1, a1, 1
; RV32I-NEXT: srli a1, a1, 1
; RV32I-NEXT: mv a2, a0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -508,11 +508,11 @@ define double @fabs_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: slli a0, a0, 1
; RV64I-NEXT: srli a0, a0, 1
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -558,7 +558,7 @@ define double @fmin_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmin@plt
+; RV32I-NEXT: call fmin
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -567,7 +567,7 @@ define double @fmin_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmin@plt
+; RV64I-NEXT: call fmin
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -611,7 +611,7 @@ define double @fmax_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmax@plt
+; RV32I-NEXT: call fmax
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -620,7 +620,7 @@ define double @fmax_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmax@plt
+; RV64I-NEXT: call fmax
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -668,7 +668,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -677,7 +677,7 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -747,7 +747,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a4, a0
; RV32I-NEXT: lui a5, 524288
; RV32I-NEXT: xor a5, a1, a5
@@ -755,7 +755,7 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -774,13 +774,13 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a2, a0, a1
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -858,14 +858,14 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv s3, a2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a4, a0
; RV32I-NEXT: lui a5, 524288
; RV32I-NEXT: xor a2, s5, a5
@@ -874,7 +874,7 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: mv a2, s3
; RV32I-NEXT: mv a3, s2
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -895,18 +895,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a2, a1, 63
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -989,14 +989,14 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a4, a0
; RV32I-NEXT: lui a5, 524288
; RV32I-NEXT: xor a3, s5, a5
@@ -1004,7 +1004,7 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s4
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1026,17 +1026,17 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a2, a1, 63
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1095,7 +1095,7 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1106,7 +1106,7 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -1162,7 +1162,7 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1173,7 +1173,7 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
@@ -1245,14 +1245,14 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv s3, a2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: mv a2, s3
; RV32I-NEXT: mv a3, s2
; RV32I-NEXT: mv a4, s1
; RV32I-NEXT: mv a5, s0
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1270,13 +1270,13 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1350,7 +1350,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: lui a3, 524288
; RV32I-NEXT: xor a3, a1, a3
@@ -1358,7 +1358,7 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a4, s1
; RV32I-NEXT: mv a5, s0
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1377,13 +1377,13 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a1, a0, a1
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1437,10 +1437,10 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a5
; RV32I-NEXT: mv s1, a4
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1453,9 +1453,9 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1529,17 +1529,17 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: mv a2, s4
; RV32I-NEXT: mv a3, s5
-; RV32I-NEXT: call __subdf3@plt
+; RV32I-NEXT: call __subdf3
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1561,13 +1561,13 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: mv a1, s2
-; RV64I-NEXT: call __subdf3@plt
+; RV64I-NEXT: call __subdf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1650,33 +1650,33 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv s3, a2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: mv a0, s4
; RV32I-NEXT: mv a1, s5
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: mv a3, s3
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: mv a2, s0
; RV32I-NEXT: mv a3, s1
-; RV32I-NEXT: call __subdf3@plt
+; RV32I-NEXT: call __subdf3
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1697,24 +1697,24 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __subdf3@plt
+; RV64I-NEXT: call __subdf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1796,24 +1796,24 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: mv s3, a2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a3, a1
; RV32I-NEXT: mv a0, s4
; RV32I-NEXT: mv a1, s5
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a3, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __subdf3@plt
+; RV32I-NEXT: call __subdf3
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1834,17 +1834,17 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __subdf3@plt
+; RV64I-NEXT: call __subdf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index f2206d5397f0..2c5505edb1fa 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -21,7 +21,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB0_2: # %if.else
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_false:
; RV64IFD: # %bb.0:
@@ -32,7 +32,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB0_2: # %if.else
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_false:
; RV32IZFINXZDINX: # %bb.0:
@@ -43,7 +43,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: .LBB0_2: # %if.else
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_false:
; RV64IZFINXZDINX: # %bb.0:
@@ -54,7 +54,7 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB0_2: # %if.else
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp false double %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -74,7 +74,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB1_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_oeq:
; RV64IFD: # %bb.0:
@@ -85,7 +85,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB1_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oeq:
; RV32IZFINXZDINX: # %bb.0:
@@ -106,7 +106,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB1_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oeq:
; RV64IZFINXZDINX: # %bb.0:
@@ -117,7 +117,7 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB1_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp oeq double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -140,7 +140,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB2_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_oeq_alt:
; RV64IFD: # %bb.0:
@@ -151,7 +151,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB2_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
; RV32IZFINXZDINX: # %bb.0:
@@ -172,7 +172,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB2_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
; RV64IZFINXZDINX: # %bb.0:
@@ -183,7 +183,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB2_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp oeq double %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -203,7 +203,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB3_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_ogt:
; RV64IFD: # %bb.0:
@@ -214,7 +214,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB3_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ogt:
; RV32IZFINXZDINX: # %bb.0:
@@ -235,7 +235,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB3_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ogt:
; RV64IZFINXZDINX: # %bb.0:
@@ -246,7 +246,7 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB3_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp ogt double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -266,7 +266,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB4_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_oge:
; RV64IFD: # %bb.0:
@@ -277,7 +277,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB4_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_oge:
; RV32IZFINXZDINX: # %bb.0:
@@ -298,7 +298,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB4_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_oge:
; RV64IZFINXZDINX: # %bb.0:
@@ -309,7 +309,7 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB4_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp oge double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -329,7 +329,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB5_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_olt:
; RV64IFD: # %bb.0:
@@ -340,7 +340,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB5_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_olt:
; RV32IZFINXZDINX: # %bb.0:
@@ -361,7 +361,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB5_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_olt:
; RV64IZFINXZDINX: # %bb.0:
@@ -372,7 +372,7 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB5_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp olt double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -392,7 +392,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB6_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_ole:
; RV64IFD: # %bb.0:
@@ -403,7 +403,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB6_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ole:
; RV32IZFINXZDINX: # %bb.0:
@@ -424,7 +424,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB6_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ole:
; RV64IZFINXZDINX: # %bb.0:
@@ -435,7 +435,7 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB6_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp ole double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -457,7 +457,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB7_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_one:
; RV64IFD: # %bb.0:
@@ -470,7 +470,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB7_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_one:
; RV32IZFINXZDINX: # %bb.0:
@@ -493,7 +493,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB7_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_one:
; RV64IZFINXZDINX: # %bb.0:
@@ -506,7 +506,7 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB7_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp one double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -528,7 +528,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB8_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_ord:
; RV64IFD: # %bb.0:
@@ -541,7 +541,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB8_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ord:
; RV32IZFINXZDINX: # %bb.0:
@@ -564,7 +564,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB8_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ord:
; RV64IZFINXZDINX: # %bb.0:
@@ -577,7 +577,7 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB8_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp ord double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -599,7 +599,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB9_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_ueq:
; RV64IFD: # %bb.0:
@@ -612,7 +612,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB9_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ueq:
; RV32IZFINXZDINX: # %bb.0:
@@ -635,7 +635,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB9_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ueq:
; RV64IZFINXZDINX: # %bb.0:
@@ -648,7 +648,7 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB9_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp ueq double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -668,7 +668,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB10_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_ugt:
; RV64IFD: # %bb.0:
@@ -679,7 +679,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB10_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ugt:
; RV32IZFINXZDINX: # %bb.0:
@@ -700,7 +700,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB10_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ugt:
; RV64IZFINXZDINX: # %bb.0:
@@ -711,7 +711,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB10_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp ugt double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -731,7 +731,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB11_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_uge:
; RV64IFD: # %bb.0:
@@ -742,7 +742,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB11_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_uge:
; RV32IZFINXZDINX: # %bb.0:
@@ -763,7 +763,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB11_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_uge:
; RV64IZFINXZDINX: # %bb.0:
@@ -774,7 +774,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB11_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp uge double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -794,7 +794,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB12_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_ult:
; RV64IFD: # %bb.0:
@@ -805,7 +805,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB12_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ult:
; RV32IZFINXZDINX: # %bb.0:
@@ -826,7 +826,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB12_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ult:
; RV64IZFINXZDINX: # %bb.0:
@@ -837,7 +837,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB12_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp ult double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -857,7 +857,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB13_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_ule:
; RV64IFD: # %bb.0:
@@ -868,7 +868,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB13_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_ule:
; RV32IZFINXZDINX: # %bb.0:
@@ -889,7 +889,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB13_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_ule:
; RV64IZFINXZDINX: # %bb.0:
@@ -900,7 +900,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB13_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp ule double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -920,7 +920,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB14_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_une:
; RV64IFD: # %bb.0:
@@ -931,7 +931,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB14_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_une:
; RV32IZFINXZDINX: # %bb.0:
@@ -952,7 +952,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB14_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_une:
; RV64IZFINXZDINX: # %bb.0:
@@ -963,7 +963,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB14_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp une double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -985,7 +985,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB15_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_uno:
; RV64IFD: # %bb.0:
@@ -998,7 +998,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB15_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_uno:
; RV32IZFINXZDINX: # %bb.0:
@@ -1021,7 +1021,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
; RV32IZFINXZDINX-NEXT: .LBB15_2: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_uno:
; RV64IZFINXZDINX: # %bb.0:
@@ -1034,7 +1034,7 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB15_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp uno double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1054,7 +1054,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
; RV32IFD-NEXT: .LBB16_2: # %if.then
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV64IFD-LABEL: br_fcmp_true:
; RV64IFD: # %bb.0:
@@ -1065,7 +1065,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
; RV64IFD-NEXT: .LBB16_2: # %if.then
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call abort@plt
+; RV64IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: br_fcmp_true:
; RV32IZFINXZDINX: # %bb.0:
@@ -1076,7 +1076,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: .LBB16_2: # %if.then
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
;
; RV64IZFINXZDINX-LABEL: br_fcmp_true:
; RV64IZFINXZDINX: # %bb.0:
@@ -1087,7 +1087,7 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: .LBB16_2: # %if.then
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call abort@plt
+; RV64IZFINXZDINX-NEXT: call abort
%1 = fcmp true double %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
diff --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
index ab511e8e6248..d46256b12052 100644
--- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll
@@ -62,7 +62,7 @@ define double @caller_double_inreg() nounwind {
; RV32IFD-NEXT: lui a2, 262364
; RV32IFD-NEXT: addi a3, a2, 655
; RV32IFD-NEXT: mv a2, a0
-; RV32IFD-NEXT: call callee_double_inreg@plt
+; RV32IFD-NEXT: call callee_double_inreg
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -78,7 +78,7 @@ define double @caller_double_inreg() nounwind {
; RV32IZFINXZDINX-NEXT: lui a2, 262364
; RV32IZFINXZDINX-NEXT: addi a3, a2, 655
; RV32IZFINXZDINX-NEXT: mv a2, a0
-; RV32IZFINXZDINX-NEXT: call callee_double_inreg@plt
+; RV32IZFINXZDINX-NEXT: call callee_double_inreg
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -145,7 +145,7 @@ define double @caller_double_split_reg_stack() nounwind {
; RV32IFD-NEXT: li a2, 0
; RV32IFD-NEXT: li a4, 0
; RV32IFD-NEXT: mv a7, a5
-; RV32IFD-NEXT: call callee_double_split_reg_stack@plt
+; RV32IFD-NEXT: call callee_double_split_reg_stack
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -167,7 +167,7 @@ define double @caller_double_split_reg_stack() nounwind {
; RV32IZFINXZDINX-NEXT: li a2, 0
; RV32IZFINXZDINX-NEXT: li a4, 0
; RV32IZFINXZDINX-NEXT: mv a7, a5
-; RV32IZFINXZDINX-NEXT: call callee_double_split_reg_stack@plt
+; RV32IZFINXZDINX-NEXT: call callee_double_split_reg_stack
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -229,7 +229,7 @@ define double @caller_double_stack() nounwind {
; RV32IFD-NEXT: li a3, 0
; RV32IFD-NEXT: li a5, 0
; RV32IFD-NEXT: li a7, 0
-; RV32IFD-NEXT: call callee_double_stack@plt
+; RV32IFD-NEXT: call callee_double_stack
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 32
; RV32IFD-NEXT: ret
@@ -256,7 +256,7 @@ define double @caller_double_stack() nounwind {
; RV32IZFINXZDINX-NEXT: li a3, 0
; RV32IZFINXZDINX-NEXT: li a5, 0
; RV32IZFINXZDINX-NEXT: li a7, 0
-; RV32IZFINXZDINX-NEXT: call callee_double_stack@plt
+; RV32IZFINXZDINX-NEXT: call callee_double_stack
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
; RV32IZFINXZDINX-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index adbe2c9e9754..967b119581af 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -46,7 +46,7 @@ define float @fcvt_s_d(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __truncdfsf2@plt
+; RV32I-NEXT: call __truncdfsf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -55,7 +55,7 @@ define float @fcvt_s_d(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __truncdfsf2@plt
+; RV64I-NEXT: call __truncdfsf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -90,7 +90,7 @@ define double @fcvt_d_s(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __extendsfdf2@plt
+; RV32I-NEXT: call __extendsfdf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -99,7 +99,7 @@ define double @fcvt_d_s(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __extendsfdf2@plt
+; RV64I-NEXT: call __extendsfdf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -134,7 +134,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -143,7 +143,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfsi@plt
+; RV64I-NEXT: call __fixdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -180,7 +180,7 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -189,7 +189,7 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfsi@plt
+; RV64I-NEXT: call __fixunsdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -232,7 +232,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: seqz a1, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -243,7 +243,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfsi@plt
+; RV64I-NEXT: call __fixunsdfsi
; RV64I-NEXT: seqz a1, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -281,7 +281,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -291,7 +291,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -330,7 +330,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -340,7 +340,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -375,7 +375,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -385,7 +385,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -430,7 +430,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -440,7 +440,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -454,7 +454,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -468,7 +468,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -482,7 +482,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfdi@plt
+; RV32I-NEXT: call __fixdfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -491,7 +491,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -505,7 +505,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -519,7 +519,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -533,7 +533,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfdi@plt
+; RV32I-NEXT: call __fixunsdfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -542,7 +542,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -556,7 +556,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __floatdidf@plt
+; RV32IFD-NEXT: call __floatdidf
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -570,7 +570,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __floatdidf@plt
+; RV32IZFINXZDINX-NEXT: call __floatdidf
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -584,7 +584,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatdidf@plt
+; RV32I-NEXT: call __floatdidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -593,7 +593,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatdidf@plt
+; RV64I-NEXT: call __floatdidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -607,7 +607,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __floatundidf@plt
+; RV32IFD-NEXT: call __floatundidf
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -621,7 +621,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __floatundidf@plt
+; RV32IZFINXZDINX-NEXT: call __floatundidf
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -635,7 +635,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatundidf@plt
+; RV32I-NEXT: call __floatundidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -644,7 +644,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatundidf@plt
+; RV64I-NEXT: call __floatundidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -679,7 +679,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -688,7 +688,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -723,7 +723,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -732,7 +732,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -767,7 +767,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -776,7 +776,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -811,7 +811,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -820,7 +820,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -871,7 +871,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: sw a1, 4(s0)
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
@@ -890,7 +890,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: sd a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -944,7 +944,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: sw a1, 4(s0)
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
@@ -963,7 +963,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: sd a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 39ac963051b5..eb8ffe75ef76 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -38,7 +38,7 @@ define float @fcvt_s_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __truncdfsf2@plt
+; RV32I-NEXT: call __truncdfsf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -47,7 +47,7 @@ define float @fcvt_s_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __truncdfsf2@plt
+; RV64I-NEXT: call __truncdfsf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -81,7 +81,7 @@ define double @fcvt_d_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __extendsfdf2@plt
+; RV32I-NEXT: call __extendsfdf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -90,7 +90,7 @@ define double @fcvt_d_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __extendsfdf2@plt
+; RV64I-NEXT: call __extendsfdf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -124,7 +124,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -133,7 +133,7 @@ define i32 @fcvt_w_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfsi@plt
+; RV64I-NEXT: call __fixdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -189,17 +189,17 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 269824
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1047552
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 794112
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: lui a0, 524288
; RV32I-NEXT: bgez s4, .LBB3_2
@@ -214,7 +214,7 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -238,10 +238,10 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -497
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB3_2
@@ -253,14 +253,14 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 22
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB3_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addiw s1, s3, -1
; RV64I-NEXT: .LBB3_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -305,7 +305,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -314,7 +314,7 @@ define i32 @fcvt_wu_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfsi@plt
+; RV64I-NEXT: call __fixunsdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -356,7 +356,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: seqz a1, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -367,7 +367,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfsi@plt
+; RV64I-NEXT: call __fixunsdfsi
; RV64I-NEXT: seqz a1, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -439,19 +439,19 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 270080
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1048064
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: and a0, s3, a0
; RV32I-NEXT: or a0, s2, a0
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -471,17 +471,17 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a0, 1055
; RV64I-NEXT: slli a0, a0, 31
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 21
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB6_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -530,7 +530,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -540,7 +540,7 @@ define double @fcvt_d_w(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -578,7 +578,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -588,7 +588,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -623,7 +623,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -633,7 +633,7 @@ define double @fcvt_d_wu(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -677,7 +677,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -687,7 +687,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -701,7 +701,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -715,7 +715,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -729,7 +729,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfdi@plt
+; RV32I-NEXT: call __fixdfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -738,7 +738,7 @@ define i64 @fcvt_l_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -757,7 +757,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IFD-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB12_2
@@ -804,7 +804,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2)
@@ -861,17 +861,17 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 278016
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: li a2, -1
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 802304
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfdi@plt
+; RV32I-NEXT: call __fixdfdi
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: lui a0, 524288
@@ -887,7 +887,7 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a1, a0, s5
@@ -919,10 +919,10 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -481
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li s3, -1
; RV64I-NEXT: bgez s2, .LBB12_2
@@ -933,14 +933,14 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
; RV64I-NEXT: slli a0, a0, 53
; RV64I-NEXT: addi a1, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB12_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: srli s1, s3, 1
; RV64I-NEXT: .LBB12_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -962,7 +962,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -976,7 +976,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -990,7 +990,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfdi@plt
+; RV32I-NEXT: call __fixunsdfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -999,7 +999,7 @@ define i64 @fcvt_lu_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1018,7 +1018,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s0, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI14_0)(a2)
; RV32IFD-NEXT: and a0, s0, a0
@@ -1052,7 +1052,7 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw s1, 12(sp)
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI14_0)
; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI14_0+4)(a4)
@@ -1093,19 +1093,19 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV32I-NEXT: lui a3, 278272
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: li a2, -1
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixunsdfdi@plt
+; RV32I-NEXT: call __fixunsdfdi
; RV32I-NEXT: and a0, s3, a0
; RV32I-NEXT: or a0, s2, a0
; RV32I-NEXT: and a1, s3, a1
@@ -1126,17 +1126,17 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: addi s1, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: and s1, s1, a0
; RV64I-NEXT: li a0, 1087
; RV64I-NEXT: slli a0, a0, 52
; RV64I-NEXT: addi a1, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: or a0, a0, s1
@@ -1196,7 +1196,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1205,7 +1205,7 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1219,7 +1219,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __floatdidf@plt
+; RV32IFD-NEXT: call __floatdidf
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1233,7 +1233,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __floatdidf@plt
+; RV32IZFINXZDINX-NEXT: call __floatdidf
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1247,7 +1247,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatdidf@plt
+; RV32I-NEXT: call __floatdidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1256,7 +1256,7 @@ define double @fcvt_d_l(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatdidf@plt
+; RV64I-NEXT: call __floatdidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1269,7 +1269,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __floatundidf@plt
+; RV32IFD-NEXT: call __floatundidf
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1283,7 +1283,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __floatundidf@plt
+; RV32IZFINXZDINX-NEXT: call __floatundidf
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1297,7 +1297,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatundidf@plt
+; RV32I-NEXT: call __floatundidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1306,7 +1306,7 @@ define double @fcvt_d_lu(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatundidf@plt
+; RV64I-NEXT: call __floatundidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1364,7 +1364,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1373,7 +1373,7 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1409,7 +1409,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1418,7 +1418,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1452,7 +1452,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1461,7 +1461,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1495,7 +1495,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1504,7 +1504,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1538,7 +1538,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1547,7 +1547,7 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1597,7 +1597,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatsidf@plt
+; RV32I-NEXT: call __floatsidf
; RV32I-NEXT: sw a1, 4(s0)
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
@@ -1616,7 +1616,7 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatsidf@plt
+; RV64I-NEXT: call __floatsidf
; RV64I-NEXT: sd a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1670,7 +1670,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatunsidf@plt
+; RV32I-NEXT: call __floatunsidf
; RV32I-NEXT: sw a1, 4(s0)
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
@@ -1689,7 +1689,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatunsidf@plt
+; RV64I-NEXT: call __floatunsidf
; RV64I-NEXT: sd a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1734,7 +1734,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1743,7 +1743,7 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1831,17 +1831,17 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV32I-NEXT: addi a3, a0, -64
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 790016
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: bgez s4, .LBB26_2
; RV32I-NEXT: # %bb.1: # %start
@@ -1856,7 +1856,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -1881,10 +1881,10 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -505
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB26_2
; RV64I-NEXT: # %bb.1: # %start
@@ -1894,7 +1894,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 38
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB26_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: lui s1, 8
@@ -1902,7 +1902,7 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: .LBB26_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -1951,7 +1951,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1960,7 +1960,7 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2028,17 +2028,17 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
; RV32I-NEXT: lui a3, 265984
; RV32I-NEXT: addi a3, a3, -32
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: blez s3, .LBB28_2
@@ -2068,16 +2068,16 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 8312
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 37
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: blez a0, .LBB28_2
@@ -2133,7 +2133,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2142,7 +2142,7 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2228,17 +2228,17 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a3, 263676
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 787968
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: bgez s4, .LBB30_2
; RV32I-NEXT: # %bb.1: # %start
@@ -2252,7 +2252,7 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -2277,10 +2277,10 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -509
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB30_2
; RV64I-NEXT: # %bb.1: # %start
@@ -2289,14 +2289,14 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
; RV64I-NEXT: lui a1, 65919
; RV64I-NEXT: slli a1, a1, 34
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB30_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: li s1, 127
; RV64I-NEXT: .LBB30_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -2347,7 +2347,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2356,7 +2356,7 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2425,17 +2425,17 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 263934
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: blez s3, .LBB32_2
; RV32I-NEXT: # %bb.1: # %start
; RV32I-NEXT: li a0, 255
@@ -2463,15 +2463,15 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 131967
; RV64I-NEXT: slli a1, a1, 33
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB32_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, 255
@@ -2554,19 +2554,19 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
; RV32I-NEXT: lui a3, 270080
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1048064
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixunsdfsi@plt
+; RV32I-NEXT: call __fixunsdfsi
; RV32I-NEXT: and a0, s3, a0
; RV32I-NEXT: or a0, s2, a0
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -2586,17 +2586,17 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunsdfdi@plt
+; RV64I-NEXT: call __fixunsdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a0, 1055
; RV64I-NEXT: slli a0, a0, 31
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 21
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB33_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -2668,17 +2668,17 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV32I-NEXT: lui a3, 269824
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: lui a2, 1047552
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a3, 794112
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: li a2, 0
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __fixdfsi@plt
+; RV32I-NEXT: call __fixdfsi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: lui a0, 524288
; RV32I-NEXT: bgez s4, .LBB34_2
@@ -2693,7 +2693,7 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s3
@@ -2717,10 +2717,10 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, -497
; RV64I-NEXT: slli a1, a1, 53
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixdfdi@plt
+; RV64I-NEXT: call __fixdfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB34_2
@@ -2732,14 +2732,14 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: slli a1, a0, 22
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: blez a0, .LBB34_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addi s1, s3, -1
; RV64I-NEXT: .LBB34_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index 428f63196bfe..3ae2e997019c 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -46,7 +46,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -56,7 +56,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -107,7 +107,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -117,7 +117,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -167,7 +167,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -178,7 +178,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -229,7 +229,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltdf2@plt
+; RV32I-NEXT: call __ltdf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -239,7 +239,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltdf2@plt
+; RV64I-NEXT: call __ltdf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -289,7 +289,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ledf2@plt
+; RV32I-NEXT: call __ledf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -299,7 +299,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ledf2@plt
+; RV64I-NEXT: call __ledf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -375,13 +375,13 @@ define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: snez s4, a0
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: and a0, a0, s4
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -402,11 +402,11 @@ define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: snez s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -456,7 +456,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -466,7 +466,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -545,13 +545,13 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: seqz s4, a0
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: or a0, a0, s4
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -572,11 +572,11 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: seqz s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: or a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -633,7 +633,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ledf2@plt
+; RV32I-NEXT: call __ledf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -643,7 +643,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ledf2@plt
+; RV64I-NEXT: call __ledf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -696,7 +696,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltdf2@plt
+; RV32I-NEXT: call __ltdf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -707,7 +707,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltdf2@plt
+; RV64I-NEXT: call __ltdf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -761,7 +761,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -771,7 +771,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -824,7 +824,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -834,7 +834,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -877,7 +877,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __nedf2@plt
+; RV32I-NEXT: call __nedf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -887,7 +887,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __nedf2@plt
+; RV64I-NEXT: call __nedf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -936,7 +936,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -946,7 +946,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -992,7 +992,7 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1002,7 +1002,7 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1043,7 +1043,7 @@ define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1053,7 +1053,7 @@ define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1093,7 +1093,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1104,7 +1104,7 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1145,7 +1145,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltdf2@plt
+; RV32I-NEXT: call __ltdf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1155,7 +1155,7 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltdf2@plt
+; RV64I-NEXT: call __ltdf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1195,7 +1195,7 @@ define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ledf2@plt
+; RV32I-NEXT: call __ledf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1205,7 +1205,7 @@ define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ledf2@plt
+; RV64I-NEXT: call __ledf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1260,13 +1260,13 @@ define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: snez s4, a0
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: and a0, a0, s4
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -1287,11 +1287,11 @@ define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: snez s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1341,7 +1341,7 @@ define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1351,7 +1351,7 @@ define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1409,13 +1409,13 @@ define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: seqz s4, a0
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: or a0, a0, s4
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -1436,11 +1436,11 @@ define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: seqz s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: or a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1487,7 +1487,7 @@ define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ledf2@plt
+; RV32I-NEXT: call __ledf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1497,7 +1497,7 @@ define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ledf2@plt
+; RV64I-NEXT: call __ledf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1540,7 +1540,7 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltdf2@plt
+; RV32I-NEXT: call __ltdf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1551,7 +1551,7 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltdf2@plt
+; RV64I-NEXT: call __ltdf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1595,7 +1595,7 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1605,7 +1605,7 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1648,7 +1648,7 @@ define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1658,7 +1658,7 @@ define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1707,7 +1707,7 @@ define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __nedf2@plt
+; RV32I-NEXT: call __nedf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1717,7 +1717,7 @@ define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __nedf2@plt
+; RV64I-NEXT: call __nedf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1766,7 +1766,7 @@ define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1776,7 +1776,7 @@ define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index b1b3c46c4bf5..64a154f450f1 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -67,7 +67,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -77,7 +77,7 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -117,7 +117,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -127,7 +127,7 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -167,7 +167,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -178,7 +178,7 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -219,7 +219,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltdf2@plt
+; RV32I-NEXT: call __ltdf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -229,7 +229,7 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltdf2@plt
+; RV64I-NEXT: call __ltdf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -269,7 +269,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ledf2@plt
+; RV32I-NEXT: call __ledf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -279,7 +279,7 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ledf2@plt
+; RV64I-NEXT: call __ledf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -334,13 +334,13 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: snez s4, a0
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: and a0, a0, s4
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -361,11 +361,11 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: snez s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -415,7 +415,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -425,7 +425,7 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -483,13 +483,13 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
; RV32I-NEXT: mv s1, a2
; RV32I-NEXT: mv s2, a1
; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: call __eqdf2@plt
+; RV32I-NEXT: call __eqdf2
; RV32I-NEXT: seqz s4, a0
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: or a0, a0, s4
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -510,11 +510,11 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqdf2@plt
+; RV64I-NEXT: call __eqdf2
; RV64I-NEXT: seqz s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: or a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -561,7 +561,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ledf2@plt
+; RV32I-NEXT: call __ledf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -571,7 +571,7 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ledf2@plt
+; RV64I-NEXT: call __ledf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -614,7 +614,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltdf2@plt
+; RV32I-NEXT: call __ltdf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -625,7 +625,7 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltdf2@plt
+; RV64I-NEXT: call __ltdf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -669,7 +669,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gedf2@plt
+; RV32I-NEXT: call __gedf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -679,7 +679,7 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gedf2@plt
+; RV64I-NEXT: call __gedf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -722,7 +722,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtdf2@plt
+; RV32I-NEXT: call __gtdf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -732,7 +732,7 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtdf2@plt
+; RV64I-NEXT: call __gtdf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -775,7 +775,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __nedf2@plt
+; RV32I-NEXT: call __nedf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -785,7 +785,7 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __nedf2@plt
+; RV64I-NEXT: call __nedf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -834,7 +834,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unorddf2@plt
+; RV32I-NEXT: call __unorddf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -844,7 +844,7 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unorddf2@plt
+; RV64I-NEXT: call __unorddf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/double-frem.ll b/llvm/test/CodeGen/RISCV/double-frem.ll
index 118ded45c0fc..5303e84e5ded 100644
--- a/llvm/test/CodeGen/RISCV/double-frem.ll
+++ b/llvm/test/CodeGen/RISCV/double-frem.ll
@@ -11,24 +11,24 @@
define double @frem_f64(double %a, double %b) nounwind {
; RV32IFD-LABEL: frem_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail fmod@plt
+; RV32IFD-NEXT: tail fmod
;
; RV64IFD-LABEL: frem_f64:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: tail fmod@plt
+; RV64IFD-NEXT: tail fmod
;
; RV32IZFINXZDINX-LABEL: frem_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call fmod@plt
+; RV32IZFINXZDINX-NEXT: call fmod
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: frem_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail fmod@plt
+; RV64IZFINXZDINX-NEXT: tail fmod
%1 = frem double %a, %b
ret double %1
}
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index da24e4b7b718..c574f64150a2 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -50,7 +50,7 @@ define double @sqrt_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrt@plt
+; RV32I-NEXT: call sqrt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -59,7 +59,7 @@ define double @sqrt_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrt@plt
+; RV64I-NEXT: call sqrt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -74,7 +74,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __powidf2@plt
+; RV32IFD-NEXT: call __powidf2
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -84,7 +84,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: sext.w a0, a0
-; RV64IFD-NEXT: call __powidf2@plt
+; RV64IFD-NEXT: call __powidf2
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -93,7 +93,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __powidf2@plt
+; RV32IZFINXZDINX-NEXT: call __powidf2
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -103,7 +103,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sext.w a1, a1
-; RV64IZFINXZDINX-NEXT: call __powidf2@plt
+; RV64IZFINXZDINX-NEXT: call __powidf2
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -112,7 +112,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __powidf2@plt
+; RV32I-NEXT: call __powidf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -122,7 +122,7 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __powidf2@plt
+; RV64I-NEXT: call __powidf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -137,7 +137,7 @@ define double @sin_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call sin@plt
+; RV32IFD-NEXT: call sin
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -146,7 +146,7 @@ define double @sin_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call sin@plt
+; RV64IFD-NEXT: call sin
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -155,7 +155,7 @@ define double @sin_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call sin@plt
+; RV32IZFINXZDINX-NEXT: call sin
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -164,7 +164,7 @@ define double @sin_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call sin@plt
+; RV64IZFINXZDINX-NEXT: call sin
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -173,7 +173,7 @@ define double @sin_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sin@plt
+; RV32I-NEXT: call sin
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -182,7 +182,7 @@ define double @sin_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sin@plt
+; RV64I-NEXT: call sin
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -197,7 +197,7 @@ define double @cos_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call cos@plt
+; RV32IFD-NEXT: call cos
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -206,7 +206,7 @@ define double @cos_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call cos@plt
+; RV64IFD-NEXT: call cos
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -215,7 +215,7 @@ define double @cos_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call cos@plt
+; RV32IZFINXZDINX-NEXT: call cos
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -224,7 +224,7 @@ define double @cos_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call cos@plt
+; RV64IZFINXZDINX-NEXT: call cos
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -233,7 +233,7 @@ define double @cos_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call cos@plt
+; RV32I-NEXT: call cos
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -242,7 +242,7 @@ define double @cos_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call cos@plt
+; RV64I-NEXT: call cos
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -259,10 +259,10 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: call sin@plt
+; RV32IFD-NEXT: call sin
; RV32IFD-NEXT: fmv.d fs1, fa0
; RV32IFD-NEXT: fmv.d fa0, fs0
-; RV32IFD-NEXT: call cos@plt
+; RV32IFD-NEXT: call cos
; RV32IFD-NEXT: fadd.d fa0, fs1, fa0
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
@@ -277,10 +277,10 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fmv.d fs0, fa0
-; RV64IFD-NEXT: call sin@plt
+; RV64IFD-NEXT: call sin
; RV64IFD-NEXT: fmv.d fs1, fa0
; RV64IFD-NEXT: fmv.d fa0, fs0
-; RV64IFD-NEXT: call cos@plt
+; RV64IFD-NEXT: call cos
; RV64IFD-NEXT: fadd.d fa0, fs1, fa0
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
@@ -298,14 +298,14 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: mv s0, a1
; RV32IZFINXZDINX-NEXT: mv s1, a0
-; RV32IZFINXZDINX-NEXT: call sin@plt
+; RV32IZFINXZDINX-NEXT: call sin
; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 0(sp)
; RV32IZFINXZDINX-NEXT: lw s3, 4(sp)
; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: mv a1, s0
-; RV32IZFINXZDINX-NEXT: call cos@plt
+; RV32IZFINXZDINX-NEXT: call cos
; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
@@ -330,10 +330,10 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv s0, a0
-; RV64IZFINXZDINX-NEXT: call sin@plt
+; RV64IZFINXZDINX-NEXT: call sin
; RV64IZFINXZDINX-NEXT: mv s1, a0
; RV64IZFINXZDINX-NEXT: mv a0, s0
-; RV64IZFINXZDINX-NEXT: call cos@plt
+; RV64IZFINXZDINX-NEXT: call cos
; RV64IZFINXZDINX-NEXT: fadd.d a0, s1, a0
; RV64IZFINXZDINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -351,17 +351,17 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call sin@plt
+; RV32I-NEXT: call sin
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call cos@plt
+; RV32I-NEXT: call cos
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a3, a1
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -377,13 +377,13 @@ define double @sincos_f64(double %a) nounwind strictfp {
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: call sin@plt
+; RV64I-NEXT: call sin
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call cos@plt
+; RV64I-NEXT: call cos
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -402,7 +402,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call pow@plt
+; RV32IFD-NEXT: call pow
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -411,7 +411,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call pow@plt
+; RV64IFD-NEXT: call pow
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -420,7 +420,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call pow@plt
+; RV32IZFINXZDINX-NEXT: call pow
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -429,7 +429,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call pow@plt
+; RV64IZFINXZDINX-NEXT: call pow
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -438,7 +438,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call pow@plt
+; RV32I-NEXT: call pow
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -447,7 +447,7 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call pow@plt
+; RV64I-NEXT: call pow
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -462,7 +462,7 @@ define double @exp_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call exp@plt
+; RV32IFD-NEXT: call exp
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -471,7 +471,7 @@ define double @exp_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call exp@plt
+; RV64IFD-NEXT: call exp
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -480,7 +480,7 @@ define double @exp_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call exp@plt
+; RV32IZFINXZDINX-NEXT: call exp
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -489,7 +489,7 @@ define double @exp_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call exp@plt
+; RV64IZFINXZDINX-NEXT: call exp
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -498,7 +498,7 @@ define double @exp_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call exp@plt
+; RV32I-NEXT: call exp
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -507,7 +507,7 @@ define double @exp_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call exp@plt
+; RV64I-NEXT: call exp
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -522,7 +522,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call exp2@plt
+; RV32IFD-NEXT: call exp2
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -531,7 +531,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call exp2@plt
+; RV64IFD-NEXT: call exp2
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -540,7 +540,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call exp2@plt
+; RV32IZFINXZDINX-NEXT: call exp2
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -549,7 +549,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call exp2@plt
+; RV64IZFINXZDINX-NEXT: call exp2
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -558,7 +558,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call exp2@plt
+; RV32I-NEXT: call exp2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -567,7 +567,7 @@ define double @exp2_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call exp2@plt
+; RV64I-NEXT: call exp2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -582,7 +582,7 @@ define double @log_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call log@plt
+; RV32IFD-NEXT: call log
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -591,7 +591,7 @@ define double @log_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call log@plt
+; RV64IFD-NEXT: call log
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -600,7 +600,7 @@ define double @log_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call log@plt
+; RV32IZFINXZDINX-NEXT: call log
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -609,7 +609,7 @@ define double @log_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call log@plt
+; RV64IZFINXZDINX-NEXT: call log
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -618,7 +618,7 @@ define double @log_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log@plt
+; RV32I-NEXT: call log
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -627,7 +627,7 @@ define double @log_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log@plt
+; RV64I-NEXT: call log
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -642,7 +642,7 @@ define double @log10_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call log10@plt
+; RV32IFD-NEXT: call log10
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -651,7 +651,7 @@ define double @log10_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call log10@plt
+; RV64IFD-NEXT: call log10
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -660,7 +660,7 @@ define double @log10_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call log10@plt
+; RV32IZFINXZDINX-NEXT: call log10
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -669,7 +669,7 @@ define double @log10_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call log10@plt
+; RV64IZFINXZDINX-NEXT: call log10
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -678,7 +678,7 @@ define double @log10_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log10@plt
+; RV32I-NEXT: call log10
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -687,7 +687,7 @@ define double @log10_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log10@plt
+; RV64I-NEXT: call log10
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -702,7 +702,7 @@ define double @log2_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call log2@plt
+; RV32IFD-NEXT: call log2
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -711,7 +711,7 @@ define double @log2_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call log2@plt
+; RV64IFD-NEXT: call log2
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -720,7 +720,7 @@ define double @log2_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call log2@plt
+; RV32IZFINXZDINX-NEXT: call log2
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -729,7 +729,7 @@ define double @log2_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call log2@plt
+; RV64IZFINXZDINX-NEXT: call log2
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -738,7 +738,7 @@ define double @log2_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log2@plt
+; RV32I-NEXT: call log2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -747,7 +747,7 @@ define double @log2_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log2@plt
+; RV64I-NEXT: call log2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -795,7 +795,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -804,7 +804,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -856,10 +856,10 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a5
; RV32I-NEXT: mv s1, a4
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -872,9 +872,9 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -890,7 +890,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call fmin@plt
+; RV32IFD-NEXT: call fmin
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -899,7 +899,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call fmin@plt
+; RV64IFD-NEXT: call fmin
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -908,7 +908,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call fmin@plt
+; RV32IZFINXZDINX-NEXT: call fmin
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -917,7 +917,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call fmin@plt
+; RV64IZFINXZDINX-NEXT: call fmin
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -926,7 +926,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmin@plt
+; RV32I-NEXT: call fmin
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -935,7 +935,7 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmin@plt
+; RV64I-NEXT: call fmin
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -950,7 +950,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call fmax@plt
+; RV32IFD-NEXT: call fmax
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -959,7 +959,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call fmax@plt
+; RV64IFD-NEXT: call fmax
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -968,7 +968,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call fmax@plt
+; RV32IZFINXZDINX-NEXT: call fmax
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -977,7 +977,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call fmax@plt
+; RV64IZFINXZDINX-NEXT: call fmax
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -986,7 +986,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmax@plt
+; RV32I-NEXT: call fmax
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -995,7 +995,7 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmax@plt
+; RV64I-NEXT: call fmax
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1027,7 +1027,7 @@ define double @floor_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call floor@plt
+; RV32IFD-NEXT: call floor
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1036,7 +1036,7 @@ define double @floor_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call floor@plt
+; RV64IFD-NEXT: call floor
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1045,7 +1045,7 @@ define double @floor_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call floor@plt
+; RV32IZFINXZDINX-NEXT: call floor
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1054,7 +1054,7 @@ define double @floor_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call floor@plt
+; RV64IZFINXZDINX-NEXT: call floor
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1063,7 +1063,7 @@ define double @floor_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call floor@plt
+; RV32I-NEXT: call floor
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1072,7 +1072,7 @@ define double @floor_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call floor@plt
+; RV64I-NEXT: call floor
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1087,7 +1087,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call ceil@plt
+; RV32IFD-NEXT: call ceil
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1096,7 +1096,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call ceil@plt
+; RV64IFD-NEXT: call ceil
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1105,7 +1105,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call ceil@plt
+; RV32IZFINXZDINX-NEXT: call ceil
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1114,7 +1114,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call ceil@plt
+; RV64IZFINXZDINX-NEXT: call ceil
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1123,7 +1123,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call ceil@plt
+; RV32I-NEXT: call ceil
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1132,7 +1132,7 @@ define double @ceil_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call ceil@plt
+; RV64I-NEXT: call ceil
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1147,7 +1147,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call trunc@plt
+; RV32IFD-NEXT: call trunc
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1156,7 +1156,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call trunc@plt
+; RV64IFD-NEXT: call trunc
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1165,7 +1165,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call trunc@plt
+; RV32IZFINXZDINX-NEXT: call trunc
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1174,7 +1174,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call trunc@plt
+; RV64IZFINXZDINX-NEXT: call trunc
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1183,7 +1183,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call trunc@plt
+; RV32I-NEXT: call trunc
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1192,7 +1192,7 @@ define double @trunc_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call trunc@plt
+; RV64I-NEXT: call trunc
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1207,7 +1207,7 @@ define double @rint_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call rint@plt
+; RV32IFD-NEXT: call rint
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1216,7 +1216,7 @@ define double @rint_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call rint@plt
+; RV64IFD-NEXT: call rint
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1225,7 +1225,7 @@ define double @rint_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call rint@plt
+; RV32IZFINXZDINX-NEXT: call rint
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1234,7 +1234,7 @@ define double @rint_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call rint@plt
+; RV64IZFINXZDINX-NEXT: call rint
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1243,7 +1243,7 @@ define double @rint_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call rint@plt
+; RV32I-NEXT: call rint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1252,7 +1252,7 @@ define double @rint_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call rint@plt
+; RV64I-NEXT: call rint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1267,7 +1267,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call nearbyint@plt
+; RV32IFD-NEXT: call nearbyint
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1276,7 +1276,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call nearbyint@plt
+; RV64IFD-NEXT: call nearbyint
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1285,7 +1285,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call nearbyint@plt
+; RV32IZFINXZDINX-NEXT: call nearbyint
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1294,7 +1294,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call nearbyint@plt
+; RV64IZFINXZDINX-NEXT: call nearbyint
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1303,7 +1303,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call nearbyint@plt
+; RV32I-NEXT: call nearbyint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1312,7 +1312,7 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call nearbyint@plt
+; RV64I-NEXT: call nearbyint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1327,7 +1327,7 @@ define double @round_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call round@plt
+; RV32IFD-NEXT: call round
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1336,7 +1336,7 @@ define double @round_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call round@plt
+; RV64IFD-NEXT: call round
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1345,7 +1345,7 @@ define double @round_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call round@plt
+; RV32IZFINXZDINX-NEXT: call round
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1354,7 +1354,7 @@ define double @round_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call round@plt
+; RV64IZFINXZDINX-NEXT: call round
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1363,7 +1363,7 @@ define double @round_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call round@plt
+; RV32I-NEXT: call round
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1372,7 +1372,7 @@ define double @round_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call round@plt
+; RV64I-NEXT: call round
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1387,7 +1387,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call roundeven@plt
+; RV32IFD-NEXT: call roundeven
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1396,7 +1396,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call roundeven@plt
+; RV64IFD-NEXT: call roundeven
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1405,7 +1405,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call roundeven@plt
+; RV32IZFINXZDINX-NEXT: call roundeven
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1414,7 +1414,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call roundeven@plt
+; RV64IZFINXZDINX-NEXT: call roundeven
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1423,7 +1423,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call roundeven@plt
+; RV32I-NEXT: call roundeven
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1432,7 +1432,7 @@ define double @roundeven_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call roundeven@plt
+; RV64I-NEXT: call roundeven
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1473,7 +1473,7 @@ define iXLen @lrint_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lrint@plt
+; RV32I-NEXT: call lrint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1482,7 +1482,7 @@ define iXLen @lrint_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lrint@plt
+; RV64I-NEXT: call lrint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1523,7 +1523,7 @@ define iXLen @lround_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lround@plt
+; RV32I-NEXT: call lround
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1532,7 +1532,7 @@ define iXLen @lround_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lround@plt
+; RV64I-NEXT: call lround
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1547,7 +1547,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call llrint@plt
+; RV32IFD-NEXT: call llrint
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1561,7 +1561,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call llrint@plt
+; RV32IZFINXZDINX-NEXT: call llrint
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1575,7 +1575,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llrint@plt
+; RV32I-NEXT: call llrint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1584,7 +1584,7 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llrint@plt
+; RV64I-NEXT: call llrint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1599,7 +1599,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call llround@plt
+; RV32IFD-NEXT: call llround
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1613,7 +1613,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call llround@plt
+; RV32IZFINXZDINX-NEXT: call llround
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1627,7 +1627,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llround@plt
+; RV32I-NEXT: call llround
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1636,7 +1636,7 @@ define i64 @llround_f64(double %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llround@plt
+; RV64I-NEXT: call llround
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 36268accc8fd..f290cf0f7736 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -48,7 +48,7 @@ define double @sqrt_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrt@plt
+; RV32I-NEXT: call sqrt
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -57,7 +57,7 @@ define double @sqrt_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrt@plt
+; RV64I-NEXT: call sqrt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -70,14 +70,14 @@ declare double @llvm.powi.f64.i32(double, i32)
define double @powi_f64(double %a, i32 %b) nounwind {
; RV32IFD-LABEL: powi_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail __powidf2@plt
+; RV32IFD-NEXT: tail __powidf2
;
; RV64IFD-LABEL: powi_f64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: sext.w a0, a0
-; RV64IFD-NEXT: call __powidf2@plt
+; RV64IFD-NEXT: call __powidf2
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -86,7 +86,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __powidf2@plt
+; RV32IZFINXZDINX-NEXT: call __powidf2
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -96,7 +96,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sext.w a1, a1
-; RV64IZFINXZDINX-NEXT: call __powidf2@plt
+; RV64IZFINXZDINX-NEXT: call __powidf2
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -105,7 +105,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __powidf2@plt
+; RV32I-NEXT: call __powidf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -115,7 +115,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __powidf2@plt
+; RV64I-NEXT: call __powidf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -128,26 +128,26 @@ declare double @llvm.sin.f64(double)
define double @sin_f64(double %a) nounwind {
; CHECKIFD-LABEL: sin_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail sin@plt
+; CHECKIFD-NEXT: tail sin
;
; RV32IZFINXZDINX-LABEL: sin_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call sin@plt
+; RV32IZFINXZDINX-NEXT: call sin
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: sin_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail sin@plt
+; RV64IZFINXZDINX-NEXT: tail sin
;
; RV32I-LABEL: sin_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sin@plt
+; RV32I-NEXT: call sin
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -156,7 +156,7 @@ define double @sin_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sin@plt
+; RV64I-NEXT: call sin
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -169,26 +169,26 @@ declare double @llvm.cos.f64(double)
define double @cos_f64(double %a) nounwind {
; CHECKIFD-LABEL: cos_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail cos@plt
+; CHECKIFD-NEXT: tail cos
;
; RV32IZFINXZDINX-LABEL: cos_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call cos@plt
+; RV32IZFINXZDINX-NEXT: call cos
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: cos_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail cos@plt
+; RV64IZFINXZDINX-NEXT: tail cos
;
; RV32I-LABEL: cos_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call cos@plt
+; RV32I-NEXT: call cos
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -197,7 +197,7 @@ define double @cos_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call cos@plt
+; RV64I-NEXT: call cos
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -214,10 +214,10 @@ define double @sincos_f64(double %a) nounwind {
; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fmv.d fs0, fa0
-; RV32IFD-NEXT: call sin@plt
+; RV32IFD-NEXT: call sin
; RV32IFD-NEXT: fmv.d fs1, fa0
; RV32IFD-NEXT: fmv.d fa0, fs0
-; RV32IFD-NEXT: call cos@plt
+; RV32IFD-NEXT: call cos
; RV32IFD-NEXT: fadd.d fa0, fs1, fa0
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
@@ -232,10 +232,10 @@ define double @sincos_f64(double %a) nounwind {
; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fmv.d fs0, fa0
-; RV64IFD-NEXT: call sin@plt
+; RV64IFD-NEXT: call sin
; RV64IFD-NEXT: fmv.d fs1, fa0
; RV64IFD-NEXT: fmv.d fa0, fs0
-; RV64IFD-NEXT: call cos@plt
+; RV64IFD-NEXT: call cos
; RV64IFD-NEXT: fadd.d fa0, fs1, fa0
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
@@ -253,14 +253,14 @@ define double @sincos_f64(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: mv s0, a1
; RV32IZFINXZDINX-NEXT: mv s1, a0
-; RV32IZFINXZDINX-NEXT: call sin@plt
+; RV32IZFINXZDINX-NEXT: call sin
; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 0(sp)
; RV32IZFINXZDINX-NEXT: lw s3, 4(sp)
; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: mv a1, s0
-; RV32IZFINXZDINX-NEXT: call cos@plt
+; RV32IZFINXZDINX-NEXT: call cos
; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
@@ -285,10 +285,10 @@ define double @sincos_f64(double %a) nounwind {
; RV64IZFINXZDINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv s0, a0
-; RV64IZFINXZDINX-NEXT: call sin@plt
+; RV64IZFINXZDINX-NEXT: call sin
; RV64IZFINXZDINX-NEXT: mv s1, a0
; RV64IZFINXZDINX-NEXT: mv a0, s0
-; RV64IZFINXZDINX-NEXT: call cos@plt
+; RV64IZFINXZDINX-NEXT: call cos
; RV64IZFINXZDINX-NEXT: fadd.d a0, s1, a0
; RV64IZFINXZDINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -306,17 +306,17 @@ define double @sincos_f64(double %a) nounwind {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call sin@plt
+; RV32I-NEXT: call sin
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call cos@plt
+; RV32I-NEXT: call cos
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a3, a1
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -332,13 +332,13 @@ define double @sincos_f64(double %a) nounwind {
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: call sin@plt
+; RV64I-NEXT: call sin
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call cos@plt
+; RV64I-NEXT: call cos
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -355,26 +355,26 @@ declare double @llvm.pow.f64(double, double)
define double @pow_f64(double %a, double %b) nounwind {
; CHECKIFD-LABEL: pow_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail pow@plt
+; CHECKIFD-NEXT: tail pow
;
; RV32IZFINXZDINX-LABEL: pow_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call pow@plt
+; RV32IZFINXZDINX-NEXT: call pow
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: pow_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail pow@plt
+; RV64IZFINXZDINX-NEXT: tail pow
;
; RV32I-LABEL: pow_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call pow@plt
+; RV32I-NEXT: call pow
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -383,7 +383,7 @@ define double @pow_f64(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call pow@plt
+; RV64I-NEXT: call pow
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -396,26 +396,26 @@ declare double @llvm.exp.f64(double)
define double @exp_f64(double %a) nounwind {
; CHECKIFD-LABEL: exp_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail exp@plt
+; CHECKIFD-NEXT: tail exp
;
; RV32IZFINXZDINX-LABEL: exp_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call exp@plt
+; RV32IZFINXZDINX-NEXT: call exp
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: exp_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail exp@plt
+; RV64IZFINXZDINX-NEXT: tail exp
;
; RV32I-LABEL: exp_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call exp@plt
+; RV32I-NEXT: call exp
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -424,7 +424,7 @@ define double @exp_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call exp@plt
+; RV64I-NEXT: call exp
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -437,26 +437,26 @@ declare double @llvm.exp2.f64(double)
define double @exp2_f64(double %a) nounwind {
; CHECKIFD-LABEL: exp2_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail exp2@plt
+; CHECKIFD-NEXT: tail exp2
;
; RV32IZFINXZDINX-LABEL: exp2_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call exp2@plt
+; RV32IZFINXZDINX-NEXT: call exp2
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: exp2_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail exp2@plt
+; RV64IZFINXZDINX-NEXT: tail exp2
;
; RV32I-LABEL: exp2_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call exp2@plt
+; RV32I-NEXT: call exp2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -465,7 +465,7 @@ define double @exp2_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call exp2@plt
+; RV64I-NEXT: call exp2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -478,26 +478,26 @@ declare double @llvm.log.f64(double)
define double @log_f64(double %a) nounwind {
; CHECKIFD-LABEL: log_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail log@plt
+; CHECKIFD-NEXT: tail log
;
; RV32IZFINXZDINX-LABEL: log_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call log@plt
+; RV32IZFINXZDINX-NEXT: call log
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: log_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail log@plt
+; RV64IZFINXZDINX-NEXT: tail log
;
; RV32I-LABEL: log_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log@plt
+; RV32I-NEXT: call log
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -506,7 +506,7 @@ define double @log_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log@plt
+; RV64I-NEXT: call log
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -519,26 +519,26 @@ declare double @llvm.log10.f64(double)
define double @log10_f64(double %a) nounwind {
; CHECKIFD-LABEL: log10_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail log10@plt
+; CHECKIFD-NEXT: tail log10
;
; RV32IZFINXZDINX-LABEL: log10_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call log10@plt
+; RV32IZFINXZDINX-NEXT: call log10
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: log10_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail log10@plt
+; RV64IZFINXZDINX-NEXT: tail log10
;
; RV32I-LABEL: log10_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log10@plt
+; RV32I-NEXT: call log10
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -547,7 +547,7 @@ define double @log10_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log10@plt
+; RV64I-NEXT: call log10
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -560,26 +560,26 @@ declare double @llvm.log2.f64(double)
define double @log2_f64(double %a) nounwind {
; CHECKIFD-LABEL: log2_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail log2@plt
+; CHECKIFD-NEXT: tail log2
;
; RV32IZFINXZDINX-LABEL: log2_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call log2@plt
+; RV32IZFINXZDINX-NEXT: call log2
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: log2_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail log2@plt
+; RV64IZFINXZDINX-NEXT: tail log2
;
; RV32I-LABEL: log2_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log2@plt
+; RV32I-NEXT: call log2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -588,7 +588,7 @@ define double @log2_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log2@plt
+; RV64I-NEXT: call log2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -636,7 +636,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fma@plt
+; RV32I-NEXT: call fma
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -645,7 +645,7 @@ define double @fma_f64(double %a, double %b, double %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fma@plt
+; RV64I-NEXT: call fma
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -697,10 +697,10 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a5
; RV32I-NEXT: mv s1, a4
-; RV32I-NEXT: call __muldf3@plt
+; RV32I-NEXT: call __muldf3
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __adddf3@plt
+; RV32I-NEXT: call __adddf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -713,9 +713,9 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: call __muldf3@plt
+; RV64I-NEXT: call __muldf3
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __adddf3@plt
+; RV64I-NEXT: call __adddf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -795,7 +795,7 @@ define double @minnum_f64(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmin@plt
+; RV32I-NEXT: call fmin
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -804,7 +804,7 @@ define double @minnum_f64(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmin@plt
+; RV64I-NEXT: call fmin
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -848,7 +848,7 @@ define double @maxnum_f64(double %a, double %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmax@plt
+; RV32I-NEXT: call fmax
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -857,7 +857,7 @@ define double @maxnum_f64(double %a, double %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmax@plt
+; RV64I-NEXT: call fmax
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -940,7 +940,7 @@ declare double @llvm.floor.f64(double)
define double @floor_f64(double %a) nounwind {
; RV32IFD-LABEL: floor_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail floor@plt
+; RV32IFD-NEXT: tail floor
;
; RV64IFD-LABEL: floor_f64:
; RV64IFD: # %bb.0:
@@ -960,7 +960,7 @@ define double @floor_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call floor@plt
+; RV32IZFINXZDINX-NEXT: call floor
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -983,7 +983,7 @@ define double @floor_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call floor@plt
+; RV32I-NEXT: call floor
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -992,7 +992,7 @@ define double @floor_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call floor@plt
+; RV64I-NEXT: call floor
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1005,7 +1005,7 @@ declare double @llvm.ceil.f64(double)
define double @ceil_f64(double %a) nounwind {
; RV32IFD-LABEL: ceil_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail ceil@plt
+; RV32IFD-NEXT: tail ceil
;
; RV64IFD-LABEL: ceil_f64:
; RV64IFD: # %bb.0:
@@ -1025,7 +1025,7 @@ define double @ceil_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call ceil@plt
+; RV32IZFINXZDINX-NEXT: call ceil
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1048,7 +1048,7 @@ define double @ceil_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call ceil@plt
+; RV32I-NEXT: call ceil
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1057,7 +1057,7 @@ define double @ceil_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call ceil@plt
+; RV64I-NEXT: call ceil
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1070,7 +1070,7 @@ declare double @llvm.trunc.f64(double)
define double @trunc_f64(double %a) nounwind {
; RV32IFD-LABEL: trunc_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail trunc@plt
+; RV32IFD-NEXT: tail trunc
;
; RV64IFD-LABEL: trunc_f64:
; RV64IFD: # %bb.0:
@@ -1090,7 +1090,7 @@ define double @trunc_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call trunc@plt
+; RV32IZFINXZDINX-NEXT: call trunc
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1113,7 +1113,7 @@ define double @trunc_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call trunc@plt
+; RV32I-NEXT: call trunc
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1122,7 +1122,7 @@ define double @trunc_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call trunc@plt
+; RV64I-NEXT: call trunc
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1135,7 +1135,7 @@ declare double @llvm.rint.f64(double)
define double @rint_f64(double %a) nounwind {
; RV32IFD-LABEL: rint_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail rint@plt
+; RV32IFD-NEXT: tail rint
;
; RV64IFD-LABEL: rint_f64:
; RV64IFD: # %bb.0:
@@ -1155,7 +1155,7 @@ define double @rint_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call rint@plt
+; RV32IZFINXZDINX-NEXT: call rint
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1178,7 +1178,7 @@ define double @rint_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call rint@plt
+; RV32I-NEXT: call rint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1187,7 +1187,7 @@ define double @rint_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call rint@plt
+; RV64I-NEXT: call rint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1200,26 +1200,26 @@ declare double @llvm.nearbyint.f64(double)
define double @nearbyint_f64(double %a) nounwind {
; CHECKIFD-LABEL: nearbyint_f64:
; CHECKIFD: # %bb.0:
-; CHECKIFD-NEXT: tail nearbyint@plt
+; CHECKIFD-NEXT: tail nearbyint
;
; RV32IZFINXZDINX-LABEL: nearbyint_f64:
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call nearbyint@plt
+; RV32IZFINXZDINX-NEXT: call nearbyint
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
;
; RV64IZFINXZDINX-LABEL: nearbyint_f64:
; RV64IZFINXZDINX: # %bb.0:
-; RV64IZFINXZDINX-NEXT: tail nearbyint@plt
+; RV64IZFINXZDINX-NEXT: tail nearbyint
;
; RV32I-LABEL: nearbyint_f64:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call nearbyint@plt
+; RV32I-NEXT: call nearbyint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1228,7 +1228,7 @@ define double @nearbyint_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call nearbyint@plt
+; RV64I-NEXT: call nearbyint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1241,7 +1241,7 @@ declare double @llvm.round.f64(double)
define double @round_f64(double %a) nounwind {
; RV32IFD-LABEL: round_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail round@plt
+; RV32IFD-NEXT: tail round
;
; RV64IFD-LABEL: round_f64:
; RV64IFD: # %bb.0:
@@ -1261,7 +1261,7 @@ define double @round_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call round@plt
+; RV32IZFINXZDINX-NEXT: call round
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1284,7 +1284,7 @@ define double @round_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call round@plt
+; RV32I-NEXT: call round
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1293,7 +1293,7 @@ define double @round_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call round@plt
+; RV64I-NEXT: call round
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1306,7 +1306,7 @@ declare double @llvm.roundeven.f64(double)
define double @roundeven_f64(double %a) nounwind {
; RV32IFD-LABEL: roundeven_f64:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail roundeven@plt
+; RV32IFD-NEXT: tail roundeven
;
; RV64IFD-LABEL: roundeven_f64:
; RV64IFD: # %bb.0:
@@ -1326,7 +1326,7 @@ define double @roundeven_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call roundeven@plt
+; RV32IZFINXZDINX-NEXT: call roundeven
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1349,7 +1349,7 @@ define double @roundeven_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call roundeven@plt
+; RV32I-NEXT: call roundeven
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1358,7 +1358,7 @@ define double @roundeven_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call roundeven@plt
+; RV64I-NEXT: call roundeven
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1399,7 +1399,7 @@ define iXLen @lrint_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lrint@plt
+; RV32I-NEXT: call lrint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1408,7 +1408,7 @@ define iXLen @lrint_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lrint@plt
+; RV64I-NEXT: call lrint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1450,7 +1450,7 @@ define iXLen @lround_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lround@plt
+; RV32I-NEXT: call lround
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1459,7 +1459,7 @@ define iXLen @lround_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lround@plt
+; RV64I-NEXT: call lround
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1493,7 +1493,7 @@ define i32 @lround_i32_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lround@plt
+; RV32I-NEXT: call lround
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1502,7 +1502,7 @@ define i32 @lround_i32_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lround@plt
+; RV64I-NEXT: call lround
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1517,7 +1517,7 @@ define i64 @llrint_f64(double %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call llrint@plt
+; RV32IFD-NEXT: call llrint
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1531,7 +1531,7 @@ define i64 @llrint_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call llrint@plt
+; RV32IZFINXZDINX-NEXT: call llrint
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1545,7 +1545,7 @@ define i64 @llrint_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llrint@plt
+; RV32I-NEXT: call llrint
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1554,7 +1554,7 @@ define i64 @llrint_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llrint@plt
+; RV64I-NEXT: call llrint
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1569,7 +1569,7 @@ define i64 @llround_f64(double %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call llround@plt
+; RV32IFD-NEXT: call llround
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1583,7 +1583,7 @@ define i64 @llround_f64(double %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call llround@plt
+; RV32IZFINXZDINX-NEXT: call llround
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1597,7 +1597,7 @@ define i64 @llround_f64(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llround@plt
+; RV32I-NEXT: call llround
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1606,7 +1606,7 @@ define i64 @llround_f64(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llround@plt
+; RV64I-NEXT: call llround
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index fb043f44beb3..6c6f70d6e2ed 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -217,7 +217,7 @@ define dso_local double @fld_stack(double %a) nounwind {
; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call notdead@plt
+; RV32IFD-NEXT: call notdead
; RV32IFD-NEXT: fld fa5, 8(sp)
; RV32IFD-NEXT: fadd.d fa0, fa5, fs0
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -232,7 +232,7 @@ define dso_local double @fld_stack(double %a) nounwind {
; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fmv.d fs0, fa0
; RV64IFD-NEXT: addi a0, sp, 8
-; RV64IFD-NEXT: call notdead@plt
+; RV64IFD-NEXT: call notdead
; RV64IFD-NEXT: fld fa5, 8(sp)
; RV64IFD-NEXT: fadd.d fa0, fa5, fs0
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -251,7 +251,7 @@ define dso_local double @fld_stack(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: lw s0, 0(sp)
; RV32IZFINXZDINX-NEXT: lw s1, 4(sp)
; RV32IZFINXZDINX-NEXT: addi a0, sp, 8
-; RV32IZFINXZDINX-NEXT: call notdead@plt
+; RV32IZFINXZDINX-NEXT: call notdead
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0
@@ -272,7 +272,7 @@ define dso_local double @fld_stack(double %a) nounwind {
; RV64IZFINXZDINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv s0, a0
; RV64IZFINXZDINX-NEXT: addi a0, sp, 8
-; RV64IZFINXZDINX-NEXT: call notdead@plt
+; RV64IZFINXZDINX-NEXT: call notdead
; RV64IZFINXZDINX-NEXT: ld a0, 8(sp)
; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, s0
; RV64IZFINXZDINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -294,7 +294,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
; RV32IFD-NEXT: fadd.d fa5, fa0, fa1
; RV32IFD-NEXT: fsd fa5, 0(sp)
; RV32IFD-NEXT: mv a0, sp
-; RV32IFD-NEXT: call notdead@plt
+; RV32IFD-NEXT: call notdead
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -306,7 +306,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
; RV64IFD-NEXT: fadd.d fa5, fa0, fa1
; RV64IFD-NEXT: fsd fa5, 0(sp)
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call notdead@plt
+; RV64IFD-NEXT: call notdead
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -327,7 +327,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
; RV32IZFINXZDINX-NEXT: sw a0, 16(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 20(sp)
; RV32IZFINXZDINX-NEXT: addi a0, sp, 16
-; RV32IZFINXZDINX-NEXT: call notdead@plt
+; RV32IZFINXZDINX-NEXT: call notdead
; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 32
; RV32IZFINXZDINX-NEXT: ret
@@ -339,7 +339,7 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1
; RV64IZFINXZDINX-NEXT: sd a0, 0(sp)
; RV64IZFINXZDINX-NEXT: mv a0, sp
-; RV64IZFINXZDINX-NEXT: call notdead@plt
+; RV64IZFINXZDINX-NEXT: call notdead
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index aec27b58e1fc..8b8f538886ed 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -25,7 +25,7 @@ define i32 @main() nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: lui a1, 262144
; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: call test@plt
+; RV32IFD-NEXT: call test
; RV32IFD-NEXT: sw a0, 0(sp)
; RV32IFD-NEXT: sw a1, 4(sp)
; RV32IFD-NEXT: fld fa5, 0(sp)
@@ -39,9 +39,9 @@ define i32 @main() nounwind {
; RV32IFD-NEXT: flt.d a0, fa4, fa5
; RV32IFD-NEXT: bnez a0, .LBB1_3
; RV32IFD-NEXT: # %bb.2: # %if.end
-; RV32IFD-NEXT: call exit@plt
+; RV32IFD-NEXT: call exit
; RV32IFD-NEXT: .LBB1_3: # %if.then
-; RV32IFD-NEXT: call abort@plt
+; RV32IFD-NEXT: call abort
;
; RV32IZFINXZDINX-LABEL: main:
; RV32IZFINXZDINX: # %bb.0: # %entry
@@ -49,7 +49,7 @@ define i32 @main() nounwind {
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: lui a1, 262144
; RV32IZFINXZDINX-NEXT: li a0, 0
-; RV32IZFINXZDINX-NEXT: call test@plt
+; RV32IZFINXZDINX-NEXT: call test
; RV32IZFINXZDINX-NEXT: sw a0, 0(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 4(sp)
; RV32IZFINXZDINX-NEXT: lw a0, 0(sp)
@@ -66,9 +66,9 @@ define i32 @main() nounwind {
; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0
; RV32IZFINXZDINX-NEXT: bnez a0, .LBB1_3
; RV32IZFINXZDINX-NEXT: # %bb.2: # %if.end
-; RV32IZFINXZDINX-NEXT: call exit@plt
+; RV32IZFINXZDINX-NEXT: call exit
; RV32IZFINXZDINX-NEXT: .LBB1_3: # %if.then
-; RV32IZFINXZDINX-NEXT: call abort@plt
+; RV32IZFINXZDINX-NEXT: call abort
entry:
%call = call double @test(double 2.000000e+00)
%cmp = fcmp olt double %call, 2.400000e-01
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index 5c5b4bb723b6..b8c6e8450240 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -54,12 +54,12 @@ define i64 @test_floor_si64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT: call floor@plt
+; RV32IFD-NEXT: call floor
; RV32IFD-NEXT: lui a0, %hi(.LCPI1_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB1_2
@@ -103,7 +103,7 @@ define i64 @test_floor_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call floor@plt
+; RV32IZFINXZDINX-NEXT: call floor
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
@@ -112,7 +112,7 @@ define i64 @test_floor_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a4, 524288
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: beqz s0, .LBB1_2
@@ -201,7 +201,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call floor@plt
+; RV32IFD-NEXT: call floor
; RV32IFD-NEXT: lui a0, %hi(.LCPI3_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI3_0)(a0)
; RV32IFD-NEXT: flt.d a0, fa5, fa0
@@ -209,7 +209,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s1, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: and a0, s1, a0
; RV32IFD-NEXT: or a0, s0, a0
; RV32IFD-NEXT: and a1, s1, a1
@@ -236,7 +236,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call floor@plt
+; RV32IZFINXZDINX-NEXT: call floor
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
@@ -244,7 +244,7 @@ define i64 @test_floor_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI3_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI3_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI3_0)(a2)
@@ -320,12 +320,12 @@ define i64 @test_ceil_si64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT: call ceil@plt
+; RV32IFD-NEXT: call ceil
; RV32IFD-NEXT: lui a0, %hi(.LCPI5_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB5_2
@@ -369,7 +369,7 @@ define i64 @test_ceil_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call ceil@plt
+; RV32IZFINXZDINX-NEXT: call ceil
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
@@ -378,7 +378,7 @@ define i64 @test_ceil_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a4, 524288
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: beqz s0, .LBB5_2
@@ -467,7 +467,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call ceil@plt
+; RV32IFD-NEXT: call ceil
; RV32IFD-NEXT: lui a0, %hi(.LCPI7_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI7_0)(a0)
; RV32IFD-NEXT: flt.d a0, fa5, fa0
@@ -475,7 +475,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s1, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: and a0, s1, a0
; RV32IFD-NEXT: or a0, s0, a0
; RV32IFD-NEXT: and a1, s1, a1
@@ -502,7 +502,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call ceil@plt
+; RV32IZFINXZDINX-NEXT: call ceil
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
@@ -510,7 +510,7 @@ define i64 @test_ceil_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI7_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI7_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI7_0)(a2)
@@ -586,12 +586,12 @@ define i64 @test_trunc_si64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT: call trunc@plt
+; RV32IFD-NEXT: call trunc
; RV32IFD-NEXT: lui a0, %hi(.LCPI9_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB9_2
@@ -635,7 +635,7 @@ define i64 @test_trunc_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call trunc@plt
+; RV32IZFINXZDINX-NEXT: call trunc
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
@@ -644,7 +644,7 @@ define i64 @test_trunc_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a4, 524288
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: beqz s0, .LBB9_2
@@ -733,7 +733,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call trunc@plt
+; RV32IFD-NEXT: call trunc
; RV32IFD-NEXT: lui a0, %hi(.LCPI11_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
; RV32IFD-NEXT: flt.d a0, fa5, fa0
@@ -741,7 +741,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s1, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: and a0, s1, a0
; RV32IFD-NEXT: or a0, s0, a0
; RV32IFD-NEXT: and a1, s1, a1
@@ -768,7 +768,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call trunc@plt
+; RV32IZFINXZDINX-NEXT: call trunc
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
@@ -776,7 +776,7 @@ define i64 @test_trunc_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI11_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI11_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI11_0)(a2)
@@ -852,12 +852,12 @@ define i64 @test_round_si64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT: call round@plt
+; RV32IFD-NEXT: call round
; RV32IFD-NEXT: lui a0, %hi(.LCPI13_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB13_2
@@ -901,7 +901,7 @@ define i64 @test_round_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call round@plt
+; RV32IZFINXZDINX-NEXT: call round
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
@@ -910,7 +910,7 @@ define i64 @test_round_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a4, 524288
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: beqz s0, .LBB13_2
@@ -999,7 +999,7 @@ define i64 @test_round_ui64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call round@plt
+; RV32IFD-NEXT: call round
; RV32IFD-NEXT: lui a0, %hi(.LCPI15_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI15_0)(a0)
; RV32IFD-NEXT: flt.d a0, fa5, fa0
@@ -1007,7 +1007,7 @@ define i64 @test_round_ui64(double %x) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s1, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: and a0, s1, a0
; RV32IFD-NEXT: or a0, s0, a0
; RV32IFD-NEXT: and a1, s1, a1
@@ -1034,7 +1034,7 @@ define i64 @test_round_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call round@plt
+; RV32IZFINXZDINX-NEXT: call round
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
@@ -1042,7 +1042,7 @@ define i64 @test_round_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI15_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI15_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI15_0)(a2)
@@ -1118,12 +1118,12 @@ define i64 @test_roundeven_si64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT: call roundeven@plt
+; RV32IFD-NEXT: call roundeven
; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB17_2
@@ -1167,7 +1167,7 @@ define i64 @test_roundeven_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call roundeven@plt
+; RV32IZFINXZDINX-NEXT: call roundeven
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
@@ -1176,7 +1176,7 @@ define i64 @test_roundeven_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a4, 524288
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: beqz s0, .LBB17_2
@@ -1265,7 +1265,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call roundeven@plt
+; RV32IFD-NEXT: call roundeven
; RV32IFD-NEXT: lui a0, %hi(.LCPI19_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
; RV32IFD-NEXT: flt.d a0, fa5, fa0
@@ -1273,7 +1273,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s1, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: and a0, s1, a0
; RV32IFD-NEXT: or a0, s0, a0
; RV32IFD-NEXT: and a1, s1, a1
@@ -1300,7 +1300,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call roundeven@plt
+; RV32IZFINXZDINX-NEXT: call roundeven
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
@@ -1308,7 +1308,7 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI19_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI19_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI19_0)(a2)
@@ -1384,12 +1384,12 @@ define i64 @test_rint_si64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT: call rint@plt
+; RV32IFD-NEXT: call rint
; RV32IFD-NEXT: lui a0, %hi(.LCPI21_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0)
; RV32IFD-NEXT: fmv.d fs0, fa0
; RV32IFD-NEXT: fle.d s0, fa5, fa0
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lui a4, 524288
; RV32IFD-NEXT: lui a2, 524288
; RV32IFD-NEXT: beqz s0, .LBB21_2
@@ -1433,7 +1433,7 @@ define i64 @test_rint_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call rint@plt
+; RV32IZFINXZDINX-NEXT: call rint
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s2, 8(sp)
@@ -1442,7 +1442,7 @@ define i64 @test_rint_si64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lui a4, 524288
; RV32IZFINXZDINX-NEXT: lui a2, 524288
; RV32IZFINXZDINX-NEXT: beqz s0, .LBB21_2
@@ -1531,7 +1531,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call rint@plt
+; RV32IFD-NEXT: call rint
; RV32IFD-NEXT: lui a0, %hi(.LCPI23_0)
; RV32IFD-NEXT: fld fa5, %lo(.LCPI23_0)(a0)
; RV32IFD-NEXT: flt.d a0, fa5, fa0
@@ -1539,7 +1539,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
; RV32IFD-NEXT: fcvt.d.w fa5, zero
; RV32IFD-NEXT: fle.d a0, fa5, fa0
; RV32IFD-NEXT: neg s1, a0
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: and a0, s1, a0
; RV32IFD-NEXT: or a0, s0, a0
; RV32IFD-NEXT: and a1, s1, a1
@@ -1566,7 +1566,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call rint@plt
+; RV32IZFINXZDINX-NEXT: call rint
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw s0, 8(sp)
@@ -1574,7 +1574,7 @@ define i64 @test_rint_ui64(double %x) nounwind {
; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero
; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0
; RV32IZFINXZDINX-NEXT: neg s2, a2
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI23_0)
; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI23_0+4)(a2)
; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI23_0)(a2)
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 6327afd881a5..094a4105de71 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -106,8 +106,8 @@ define i64 @test_floor_si64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call floor@plt
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call floor
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -123,8 +123,8 @@ define i64 @test_floor_si64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call floor@plt
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call floor
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -236,8 +236,8 @@ define i64 @test_floor_ui64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call floor@plt
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call floor
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -253,8 +253,8 @@ define i64 @test_floor_ui64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call floor@plt
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call floor
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -366,8 +366,8 @@ define i64 @test_ceil_si64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call ceil@plt
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call ceil
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -383,8 +383,8 @@ define i64 @test_ceil_si64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call ceil@plt
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call ceil
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -496,8 +496,8 @@ define i64 @test_ceil_ui64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call ceil@plt
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call ceil
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -513,8 +513,8 @@ define i64 @test_ceil_ui64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call ceil@plt
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call ceil
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -626,8 +626,8 @@ define i64 @test_trunc_si64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call trunc@plt
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call trunc
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -643,8 +643,8 @@ define i64 @test_trunc_si64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call trunc@plt
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call trunc
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -756,8 +756,8 @@ define i64 @test_trunc_ui64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call trunc@plt
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call trunc
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -773,8 +773,8 @@ define i64 @test_trunc_ui64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call trunc@plt
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call trunc
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -886,8 +886,8 @@ define i64 @test_round_si64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call round@plt
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call round
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -903,8 +903,8 @@ define i64 @test_round_si64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call round@plt
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call round
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1016,8 +1016,8 @@ define i64 @test_round_ui64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call round@plt
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call round
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1033,8 +1033,8 @@ define i64 @test_round_ui64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call round@plt
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call round
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1146,8 +1146,8 @@ define i64 @test_roundeven_si64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call roundeven@plt
-; RV32IFD-NEXT: call __fixdfdi@plt
+; RV32IFD-NEXT: call roundeven
+; RV32IFD-NEXT: call __fixdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1163,8 +1163,8 @@ define i64 @test_roundeven_si64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call roundeven@plt
-; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt
+; RV32IZFINXZDINX-NEXT: call roundeven
+; RV32IZFINXZDINX-NEXT: call __fixdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1276,8 +1276,8 @@ define i64 @test_roundeven_ui64(double %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call roundeven@plt
-; RV32IFD-NEXT: call __fixunsdfdi@plt
+; RV32IFD-NEXT: call roundeven
+; RV32IFD-NEXT: call __fixunsdfdi
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1293,8 +1293,8 @@ define i64 @test_roundeven_ui64(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call roundeven@plt
-; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt
+; RV32IZFINXZDINX-NEXT: call roundeven
+; RV32IZFINXZDINX-NEXT: call __fixunsdfdi
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1311,7 +1311,7 @@ define i64 @test_roundeven_ui64(double %x) {
define double @test_floor_double(double %x) {
; RV32IFD-LABEL: test_floor_double:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail floor@plt
+; RV32IFD-NEXT: tail floor
;
; RV64IFD-LABEL: test_floor_double:
; RV64IFD: # %bb.0:
@@ -1333,7 +1333,7 @@ define double @test_floor_double(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call floor@plt
+; RV32IZFINXZDINX-NEXT: call floor
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1358,7 +1358,7 @@ define double @test_floor_double(double %x) {
define double @test_ceil_double(double %x) {
; RV32IFD-LABEL: test_ceil_double:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail ceil@plt
+; RV32IFD-NEXT: tail ceil
;
; RV64IFD-LABEL: test_ceil_double:
; RV64IFD: # %bb.0:
@@ -1380,7 +1380,7 @@ define double @test_ceil_double(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call ceil@plt
+; RV32IZFINXZDINX-NEXT: call ceil
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1405,7 +1405,7 @@ define double @test_ceil_double(double %x) {
define double @test_trunc_double(double %x) {
; RV32IFD-LABEL: test_trunc_double:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail trunc@plt
+; RV32IFD-NEXT: tail trunc
;
; RV64IFD-LABEL: test_trunc_double:
; RV64IFD: # %bb.0:
@@ -1427,7 +1427,7 @@ define double @test_trunc_double(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call trunc@plt
+; RV32IZFINXZDINX-NEXT: call trunc
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1452,7 +1452,7 @@ define double @test_trunc_double(double %x) {
define double @test_round_double(double %x) {
; RV32IFD-LABEL: test_round_double:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail round@plt
+; RV32IFD-NEXT: tail round
;
; RV64IFD-LABEL: test_round_double:
; RV64IFD: # %bb.0:
@@ -1474,7 +1474,7 @@ define double @test_round_double(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call round@plt
+; RV32IZFINXZDINX-NEXT: call round
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1499,7 +1499,7 @@ define double @test_round_double(double %x) {
define double @test_roundeven_double(double %x) {
; RV32IFD-LABEL: test_roundeven_double:
; RV32IFD: # %bb.0:
-; RV32IFD-NEXT: tail roundeven@plt
+; RV32IFD-NEXT: tail roundeven
;
; RV64IFD-LABEL: test_roundeven_double:
; RV64IFD: # %bb.0:
@@ -1521,7 +1521,7 @@ define double @test_roundeven_double(double %x) {
; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINXZDINX-NEXT: call roundeven@plt
+; RV32IZFINXZDINX-NEXT: call roundeven
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index 2fec986f0ba5..aa88a365431a 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -23,7 +23,7 @@ define double @func(double %d, i32 %n) nounwind {
; RV32IFD-NEXT: lw a0, 16(sp)
; RV32IFD-NEXT: lw a1, 20(sp)
; RV32IFD-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
-; RV32IFD-NEXT: call func@plt
+; RV32IFD-NEXT: call func
; RV32IFD-NEXT: sw a0, 16(sp)
; RV32IFD-NEXT: sw a1, 20(sp)
; RV32IFD-NEXT: fld fa5, 16(sp)
@@ -48,7 +48,7 @@ define double @func(double %d, i32 %n) nounwind {
; RV64IFD-NEXT: addiw a1, a1, -1
; RV64IFD-NEXT: fmv.x.d a0, fa5
; RV64IFD-NEXT: fsd fa5, 0(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call func@plt
+; RV64IFD-NEXT: call func
; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fld fa4, 0(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fadd.d fa5, fa5, fa4
@@ -77,7 +77,7 @@ define double @func(double %d, i32 %n) nounwind {
; RV32IZFINXZDINX-NEXT: sw s1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 12(sp)
-; RV32IZFINXZDINX-NEXT: call func@plt
+; RV32IZFINXZDINX-NEXT: call func
; RV32IZFINXZDINX-NEXT: sw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: sw a1, 12(sp)
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
@@ -108,7 +108,7 @@ define double @func(double %d, i32 %n) nounwind {
; RV64IZFINXZDINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: addiw a1, a1, -1
; RV64IZFINXZDINX-NEXT: mv s0, a0
-; RV64IZFINXZDINX-NEXT: call func@plt
+; RV64IZFINXZDINX-NEXT: call func
; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, s0
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
index 12606aad215c..c4d932acbcc8 100644
--- a/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
+++ b/llvm/test/CodeGen/RISCV/eh-dwarf-cfa.ll
@@ -10,7 +10,7 @@ define void @dwarf() {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: call foo@plt
+; RV32-NEXT: call foo
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -22,7 +22,7 @@ define void @dwarf() {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: call foo@plt
+; RV64-NEXT: call foo
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/emutls.ll b/llvm/test/CodeGen/RISCV/emutls.ll
index 9ce985ba4242..4f6e9935b9aa 100644
--- a/llvm/test/CodeGen/RISCV/emutls.ll
+++ b/llvm/test/CodeGen/RISCV/emutls.ll
@@ -16,7 +16,7 @@ define ptr @get_external_x() nounwind {
; RV32-NEXT: .Lpcrel_hi0:
; RV32-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.external_x)
; RV32-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi0)(a0)
-; RV32-NEXT: call __emutls_get_address@plt
+; RV32-NEXT: call __emutls_get_address
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -28,7 +28,7 @@ define ptr @get_external_x() nounwind {
; RV64-NEXT: .Lpcrel_hi0:
; RV64-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.external_x)
; RV64-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi0)(a0)
-; RV64-NEXT: call __emutls_get_address@plt
+; RV64-NEXT: call __emutls_get_address
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -44,7 +44,7 @@ define ptr @get_y() nounwind {
; RV32-NEXT: .Lpcrel_hi1:
; RV32-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.y)
; RV32-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi1)(a0)
-; RV32-NEXT: call __emutls_get_address@plt
+; RV32-NEXT: call __emutls_get_address
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -56,7 +56,7 @@ define ptr @get_y() nounwind {
; RV64-NEXT: .Lpcrel_hi1:
; RV64-NEXT: auipc a0, %got_pcrel_hi(__emutls_v.y)
; RV64-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi1)(a0)
-; RV64-NEXT: call __emutls_get_address@plt
+; RV64-NEXT: call __emutls_get_address
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -72,7 +72,7 @@ define ptr @get_internal_z() nounwind {
; RV32-NEXT: .Lpcrel_hi2:
; RV32-NEXT: auipc a0, %pcrel_hi(__emutls_v.internal_z)
; RV32-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
-; RV32-NEXT: call __emutls_get_address@plt
+; RV32-NEXT: call __emutls_get_address
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -84,7 +84,7 @@ define ptr @get_internal_z() nounwind {
; RV64-NEXT: .Lpcrel_hi2:
; RV64-NEXT: auipc a0, %pcrel_hi(__emutls_v.internal_z)
; RV64-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi2)
-; RV64-NEXT: call __emutls_get_address@plt
+; RV64-NEXT: call __emutls_get_address
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
index 6c98525d9765..067690333fdb 100644
--- a/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
+++ b/llvm/test/CodeGen/RISCV/exception-pointer-register.ll
@@ -28,13 +28,13 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
; RV32I-NEXT: # %bb.1: # %bb2
; RV32I-NEXT: .Ltmp0:
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call bar@plt
+; RV32I-NEXT: call bar
; RV32I-NEXT: .Ltmp1:
; RV32I-NEXT: j .LBB0_3
; RV32I-NEXT: .LBB0_2: # %bb1
; RV32I-NEXT: .Ltmp2:
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: .Ltmp3:
; RV32I-NEXT: .LBB0_3: # %end2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -48,7 +48,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call callee
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call _Unwind_Resume@plt
+; RV32I-NEXT: call _Unwind_Resume
;
; RV64I-LABEL: caller:
; RV64I: # %bb.0: # %entry
@@ -65,13 +65,13 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
; RV64I-NEXT: # %bb.1: # %bb2
; RV64I-NEXT: .Ltmp0:
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call bar@plt
+; RV64I-NEXT: call bar
; RV64I-NEXT: .Ltmp1:
; RV64I-NEXT: j .LBB0_3
; RV64I-NEXT: .LBB0_2: # %bb1
; RV64I-NEXT: .Ltmp2:
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call foo@plt
+; RV64I-NEXT: call foo
; RV64I-NEXT: .Ltmp3:
; RV64I-NEXT: .LBB0_3: # %end2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -85,7 +85,7 @@ define void @caller(ptr %p) personality ptr @__gxx_personality_v0 {
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call callee
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call _Unwind_Resume@plt
+; RV64I-NEXT: call _Unwind_Resume
entry:
%0 = icmp eq ptr %p, null
br i1 %0, label %bb1, label %bb2
diff --git a/llvm/test/CodeGen/RISCV/fastcc-float.ll b/llvm/test/CodeGen/RISCV/fastcc-float.ll
index c5daa612c671..488c97d5a450 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-float.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-float.ll
@@ -62,7 +62,7 @@ define float @caller(<32 x float> %A) nounwind {
; CHECK-NEXT: fsw fs2, 8(sp)
; CHECK-NEXT: fsw fs1, 4(sp)
; CHECK-NEXT: fsw fs0, 0(sp)
-; CHECK-NEXT: call callee@plt
+; CHECK-NEXT: call callee
; CHECK-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 64
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/fastcc-int.ll b/llvm/test/CodeGen/RISCV/fastcc-int.ll
index 0df0ce183c01..e4c41a1aa890 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-int.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-int.ll
@@ -44,7 +44,7 @@ define i32 @caller(<16 x i32> %A) nounwind {
; RV32-NEXT: sw s0, 4(sp)
; RV32-NEXT: sw t1, 0(sp)
; RV32-NEXT: mv a0, t0
-; RV32-NEXT: call callee@plt
+; RV32-NEXT: call callee
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
@@ -75,7 +75,7 @@ define i32 @caller(<16 x i32> %A) nounwind {
; RV64-NEXT: sd s0, 8(sp)
; RV64-NEXT: sd t1, 0(sp)
; RV64-NEXT: mv a0, t0
-; RV64-NEXT: call callee@plt
+; RV64-NEXT: call callee
; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 48
diff --git a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
index e667325db3aa..fb0b34cf796b 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
@@ -321,7 +321,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZHINX32-NEXT: lw t3, 52(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw t4, 48(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw t5, 44(sp) # 4-byte Folded Reload
-; ZHINX32-NEXT: call callee_half_32@plt
+; ZHINX32-NEXT: call callee_half_32
; ZHINX32-NEXT: lw ra, 108(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw s0, 104(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw s1, 100(sp) # 4-byte Folded Reload
@@ -405,7 +405,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZHINX64-NEXT: ld t3, 56(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld t4, 48(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld t5, 40(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: call callee_half_32@plt
+; ZHINX64-NEXT: call callee_half_32
; ZHINX64-NEXT: ld ra, 168(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld s0, 160(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld s1, 152(sp) # 8-byte Folded Reload
@@ -498,7 +498,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZFINX32-NEXT: lw a2, 84(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw a3, 80(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw a4, 76(sp) # 4-byte Folded Reload
-; ZFINX32-NEXT: call callee_half_32@plt
+; ZFINX32-NEXT: call callee_half_32
; ZFINX32-NEXT: lui a1, 1048560
; ZFINX32-NEXT: or a0, a0, a1
; ZFINX32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
@@ -593,7 +593,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZFINX64-NEXT: ld a2, 168(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld a3, 160(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld a4, 152(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: call callee_half_32@plt
+; ZFINX64-NEXT: call callee_half_32
; ZFINX64-NEXT: lui a1, 1048560
; ZFINX64-NEXT: or a0, a0, a1
; ZFINX64-NEXT: ld ra, 280(sp) # 8-byte Folded Reload
@@ -688,7 +688,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZDINX32-NEXT: lw a2, 84(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw a3, 80(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw a4, 76(sp) # 4-byte Folded Reload
-; ZDINX32-NEXT: call callee_half_32@plt
+; ZDINX32-NEXT: call callee_half_32
; ZDINX32-NEXT: lui a1, 1048560
; ZDINX32-NEXT: or a0, a0, a1
; ZDINX32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
@@ -783,7 +783,7 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZDINX64-NEXT: ld a2, 168(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld a3, 160(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld a4, 152(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: call callee_half_32@plt
+; ZDINX64-NEXT: call callee_half_32
; ZDINX64-NEXT: lui a1, 1048560
; ZDINX64-NEXT: or a0, a0, a1
; ZDINX64-NEXT: ld ra, 280(sp) # 8-byte Folded Reload
@@ -901,7 +901,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZHINX32-NEXT: lw t3, 84(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw t4, 80(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw t5, 76(sp) # 4-byte Folded Reload
-; ZHINX32-NEXT: call callee_float_32@plt
+; ZHINX32-NEXT: call callee_float_32
; ZHINX32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
@@ -985,7 +985,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZHINX64-NEXT: ld t3, 104(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld t4, 96(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld t5, 88(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: call callee_float_32@plt
+; ZHINX64-NEXT: call callee_float_32
; ZHINX64-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
; ZHINX64-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
@@ -1069,7 +1069,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZFINX32-NEXT: lw t3, 84(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw t4, 80(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw t5, 76(sp) # 4-byte Folded Reload
-; ZFINX32-NEXT: call callee_float_32@plt
+; ZFINX32-NEXT: call callee_float_32
; ZFINX32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
@@ -1153,7 +1153,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZFINX64-NEXT: ld t3, 104(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld t4, 96(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld t5, 88(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: call callee_float_32@plt
+; ZFINX64-NEXT: call callee_float_32
; ZFINX64-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
@@ -1237,7 +1237,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZDINX32-NEXT: lw t3, 84(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw t4, 80(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw t5, 76(sp) # 4-byte Folded Reload
-; ZDINX32-NEXT: call callee_float_32@plt
+; ZDINX32-NEXT: call callee_float_32
; ZDINX32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw s1, 132(sp) # 4-byte Folded Reload
@@ -1321,7 +1321,7 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZDINX64-NEXT: ld t3, 104(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld t4, 96(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld t5, 88(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: call callee_float_32@plt
+; ZDINX64-NEXT: call callee_float_32
; ZDINX64-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/fli-licm.ll b/llvm/test/CodeGen/RISCV/fli-licm.ll
index 4962a146362d..ba6b33c4f0a4 100644
--- a/llvm/test/CodeGen/RISCV/fli-licm.ll
+++ b/llvm/test/CodeGen/RISCV/fli-licm.ll
@@ -22,7 +22,7 @@ define void @process_nodes(ptr %0) nounwind {
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NEXT: fli.s fa0, 1.0
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call do_it@plt
+; RV32-NEXT: call do_it
; RV32-NEXT: lw s0, 0(s0)
; RV32-NEXT: bnez s0, .LBB0_2
; RV32-NEXT: # %bb.3:
@@ -44,7 +44,7 @@ define void @process_nodes(ptr %0) nounwind {
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NEXT: fli.s fa0, 1.0
; RV64-NEXT: mv a0, s0
-; RV64-NEXT: call do_it@plt
+; RV64-NEXT: call do_it
; RV64-NEXT: ld s0, 0(s0)
; RV64-NEXT: bnez s0, .LBB0_2
; RV64-NEXT: # %bb.3:
diff --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
index 0252c8ca0f72..90ce034eafd3 100644
--- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
@@ -26,7 +26,7 @@ define float @fadd_s(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -35,7 +35,7 @@ define float @fadd_s(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -59,7 +59,7 @@ define float @fsub_s(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __subsf3@plt
+; RV32I-NEXT: call __subsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -68,7 +68,7 @@ define float @fsub_s(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __subsf3@plt
+; RV64I-NEXT: call __subsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -92,7 +92,7 @@ define float @fmul_s(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -101,7 +101,7 @@ define float @fmul_s(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -125,7 +125,7 @@ define float @fdiv_s(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __divsf3@plt
+; RV32I-NEXT: call __divsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -134,7 +134,7 @@ define float @fdiv_s(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __divsf3@plt
+; RV64I-NEXT: call __divsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -158,7 +158,7 @@ define float @fsqrt_s(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrtf@plt
+; RV32I-NEXT: call sqrtf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -167,7 +167,7 @@ define float @fsqrt_s(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrtf@plt
+; RV64I-NEXT: call sqrtf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -186,7 +186,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call fminf@plt
+; RV32IF-NEXT: call fminf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -195,7 +195,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call fminf@plt
+; RV64IF-NEXT: call fminf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -204,7 +204,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fminf@plt
+; RV32I-NEXT: call fminf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -213,7 +213,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fminf@plt
+; RV64I-NEXT: call fminf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -222,7 +222,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call fminf@plt
+; RV32IZFINX-NEXT: call fminf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -231,7 +231,7 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call fminf@plt
+; RV64IZFINX-NEXT: call fminf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -245,7 +245,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call fmaxf@plt
+; RV32IF-NEXT: call fmaxf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -254,7 +254,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call fmaxf@plt
+; RV64IF-NEXT: call fmaxf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -263,7 +263,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaxf@plt
+; RV32I-NEXT: call fmaxf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -272,7 +272,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaxf@plt
+; RV64I-NEXT: call fmaxf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -281,7 +281,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call fmaxf@plt
+; RV32IZFINX-NEXT: call fmaxf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -290,7 +290,7 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call fmaxf@plt
+; RV64IZFINX-NEXT: call fmaxf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -309,7 +309,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -318,7 +318,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -350,12 +350,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a2, a0, a2
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -372,12 +372,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a2, 524288
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -414,17 +414,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, s2, a2
; RV32I-NEXT: xor a2, a0, a2
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -442,17 +442,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a2, 524288
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -494,16 +494,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, s2, a2
; RV32I-NEXT: xor a2, a0, a2
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -522,16 +522,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a2, 524288
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -570,12 +570,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: mv a2, s0
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -591,12 +591,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -632,12 +632,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a1, a0, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a2, s0
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -654,12 +654,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a1, a0, a1
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 5497827a3f2f..7a7ebe651c08 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -32,7 +32,7 @@ define float @fadd_s(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -41,7 +41,7 @@ define float @fadd_s(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -64,7 +64,7 @@ define float @fsub_s(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __subsf3@plt
+; RV32I-NEXT: call __subsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -73,7 +73,7 @@ define float @fsub_s(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __subsf3@plt
+; RV64I-NEXT: call __subsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -96,7 +96,7 @@ define float @fmul_s(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -105,7 +105,7 @@ define float @fmul_s(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -128,7 +128,7 @@ define float @fdiv_s(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __divsf3@plt
+; RV32I-NEXT: call __divsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -137,7 +137,7 @@ define float @fdiv_s(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __divsf3@plt
+; RV64I-NEXT: call __divsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -162,7 +162,7 @@ define float @fsqrt_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrtf@plt
+; RV32I-NEXT: call sqrtf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -171,7 +171,7 @@ define float @fsqrt_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrtf@plt
+; RV64I-NEXT: call sqrtf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -233,10 +233,10 @@ define i32 @fneg_s(float %a, float %b) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a1, a0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a1, a0, a1
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -247,10 +247,10 @@ define i32 @fneg_s(float %a, float %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a1, a0, a1
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -281,7 +281,7 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: and a0, a0, a1
@@ -299,7 +299,7 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: not a0, a0
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: and a0, a0, a1
@@ -337,11 +337,11 @@ define float @fabs_s(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: slli a0, a0, 1
; RV32I-NEXT: srli a0, a0, 1
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -350,11 +350,11 @@ define float @fabs_s(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: slli a0, a0, 33
; RV64I-NEXT: srli a0, a0, 33
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -381,7 +381,7 @@ define float @fmin_s(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fminf@plt
+; RV32I-NEXT: call fminf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -390,7 +390,7 @@ define float @fmin_s(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fminf@plt
+; RV64I-NEXT: call fminf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -415,7 +415,7 @@ define float @fmax_s(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaxf@plt
+; RV32I-NEXT: call fmaxf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -424,7 +424,7 @@ define float @fmax_s(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaxf@plt
+; RV64I-NEXT: call fmaxf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -449,7 +449,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -458,7 +458,7 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -490,12 +490,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a2, a0, a2
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -512,12 +512,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a2, 524288
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -555,17 +555,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, s2, a2
; RV32I-NEXT: xor a2, a0, a2
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -583,17 +583,17 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a2, 524288
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -635,16 +635,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a2, 524288
; RV32I-NEXT: xor a1, s2, a2
; RV32I-NEXT: xor a2, a0, a2
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -663,16 +663,16 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a2, 524288
; RV64I-NEXT: xor a1, s2, a2
; RV64I-NEXT: xor a2, a0, a2
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -717,7 +717,7 @@ define float @fnmadd_s_3(float %a, float %b, float %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -728,7 +728,7 @@ define float @fnmadd_s_3(float %a, float %b, float %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -766,7 +766,7 @@ define float @fnmadd_nsz(float %a, float %b, float %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -777,7 +777,7 @@ define float @fnmadd_nsz(float %a, float %b, float %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -811,12 +811,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: mv a2, s0
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -832,12 +832,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -873,12 +873,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a1, a0, a1
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a2, s0
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -895,12 +895,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a1, a0, a1
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -929,9 +929,9 @@ define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a2
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -943,9 +943,9 @@ define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -980,13 +980,13 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: mv a1, s2
-; RV32I-NEXT: call __subsf3@plt
+; RV32I-NEXT: call __subsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1005,13 +1005,13 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: mv a1, s2
-; RV64I-NEXT: call __subsf3@plt
+; RV64I-NEXT: call __subsf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1052,23 +1052,23 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __subsf3@plt
+; RV32I-NEXT: call __subsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1086,23 +1086,23 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __subsf3@plt
+; RV64I-NEXT: call __subsf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1144,17 +1144,17 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __subsf3@plt
+; RV32I-NEXT: call __subsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1172,17 +1172,17 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __subsf3@plt
+; RV64I-NEXT: call __subsf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index 5c50381ad170..6aa6dedba548 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -89,13 +89,13 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
; RV32F-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32F-NEXT: mv s0, a1
; RV32F-NEXT: mv s1, a0
-; RV32F-NEXT: call __adddf3@plt
+; RV32F-NEXT: call __adddf3
; RV32F-NEXT: mv a2, a0
; RV32F-NEXT: slli a1, a1, 1
; RV32F-NEXT: srli a3, a1, 1
; RV32F-NEXT: mv a0, s1
; RV32F-NEXT: mv a1, s0
-; RV32F-NEXT: call __adddf3@plt
+; RV32F-NEXT: call __adddf3
; RV32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32F-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -110,13 +110,13 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
; RV32ZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT: mv s0, a1
; RV32ZFINX-NEXT: mv s1, a0
-; RV32ZFINX-NEXT: call __adddf3@plt
+; RV32ZFINX-NEXT: call __adddf3
; RV32ZFINX-NEXT: mv a2, a0
; RV32ZFINX-NEXT: slli a1, a1, 1
; RV32ZFINX-NEXT: srli a3, a1, 1
; RV32ZFINX-NEXT: mv a0, s1
; RV32ZFINX-NEXT: mv a1, s0
-; RV32ZFINX-NEXT: call __adddf3@plt
+; RV32ZFINX-NEXT: call __adddf3
; RV32ZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -147,11 +147,11 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
; RV64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64F-NEXT: mv s0, a0
-; RV64F-NEXT: call __adddf3@plt
+; RV64F-NEXT: call __adddf3
; RV64F-NEXT: slli a0, a0, 1
; RV64F-NEXT: srli a1, a0, 1
; RV64F-NEXT: mv a0, s0
-; RV64F-NEXT: call __adddf3@plt
+; RV64F-NEXT: call __adddf3
; RV64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64F-NEXT: addi sp, sp, 16
@@ -163,11 +163,11 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
; RV64ZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT: mv s0, a0
-; RV64ZFINX-NEXT: call __adddf3@plt
+; RV64ZFINX-NEXT: call __adddf3
; RV64ZFINX-NEXT: slli a0, a0, 1
; RV64ZFINX-NEXT: srli a1, a0, 1
; RV64ZFINX-NEXT: mv a0, s0
-; RV64ZFINX-NEXT: call __adddf3@plt
+; RV64ZFINX-NEXT: call __adddf3
; RV64ZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT: addi sp, sp, 16
@@ -262,13 +262,13 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
; RV32F-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32F-NEXT: mv s0, a1
; RV32F-NEXT: mv s1, a0
-; RV32F-NEXT: call __muldf3@plt
+; RV32F-NEXT: call __muldf3
; RV32F-NEXT: mv a2, a0
; RV32F-NEXT: lui a3, 524288
; RV32F-NEXT: xor a3, a1, a3
; RV32F-NEXT: mv a0, s1
; RV32F-NEXT: mv a1, s0
-; RV32F-NEXT: call __muldf3@plt
+; RV32F-NEXT: call __muldf3
; RV32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32F-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -283,13 +283,13 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
; RV32ZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT: mv s0, a1
; RV32ZFINX-NEXT: mv s1, a0
-; RV32ZFINX-NEXT: call __muldf3@plt
+; RV32ZFINX-NEXT: call __muldf3
; RV32ZFINX-NEXT: mv a2, a0
; RV32ZFINX-NEXT: lui a3, 524288
; RV32ZFINX-NEXT: xor a3, a1, a3
; RV32ZFINX-NEXT: mv a0, s1
; RV32ZFINX-NEXT: mv a1, s0
-; RV32ZFINX-NEXT: call __muldf3@plt
+; RV32ZFINX-NEXT: call __muldf3
; RV32ZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -320,12 +320,12 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
; RV64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64F-NEXT: mv s0, a0
-; RV64F-NEXT: call __muldf3@plt
+; RV64F-NEXT: call __muldf3
; RV64F-NEXT: li a1, -1
; RV64F-NEXT: slli a1, a1, 63
; RV64F-NEXT: xor a1, a0, a1
; RV64F-NEXT: mv a0, s0
-; RV64F-NEXT: call __muldf3@plt
+; RV64F-NEXT: call __muldf3
; RV64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64F-NEXT: addi sp, sp, 16
@@ -337,12 +337,12 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
; RV64ZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT: mv s0, a0
-; RV64ZFINX-NEXT: call __muldf3@plt
+; RV64ZFINX-NEXT: call __muldf3
; RV64ZFINX-NEXT: li a1, -1
; RV64ZFINX-NEXT: slli a1, a1, 63
; RV64ZFINX-NEXT: xor a1, a0, a1
; RV64ZFINX-NEXT: mv a0, s0
-; RV64ZFINX-NEXT: call __muldf3@plt
+; RV64ZFINX-NEXT: call __muldf3
; RV64ZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT: addi sp, sp, 16
@@ -442,13 +442,13 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
; RV32F-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32F-NEXT: mv s0, a1
; RV32F-NEXT: mv s1, a0
-; RV32F-NEXT: call __muldf3@plt
+; RV32F-NEXT: call __muldf3
; RV32F-NEXT: mv a2, a0
; RV32F-NEXT: lui a3, 524288
; RV32F-NEXT: or a3, a1, a3
; RV32F-NEXT: mv a0, s1
; RV32F-NEXT: mv a1, s0
-; RV32F-NEXT: call __muldf3@plt
+; RV32F-NEXT: call __muldf3
; RV32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32F-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -463,13 +463,13 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
; RV32ZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32ZFINX-NEXT: mv s0, a1
; RV32ZFINX-NEXT: mv s1, a0
-; RV32ZFINX-NEXT: call __muldf3@plt
+; RV32ZFINX-NEXT: call __muldf3
; RV32ZFINX-NEXT: mv a2, a0
; RV32ZFINX-NEXT: lui a3, 524288
; RV32ZFINX-NEXT: or a3, a1, a3
; RV32ZFINX-NEXT: mv a0, s1
; RV32ZFINX-NEXT: mv a1, s0
-; RV32ZFINX-NEXT: call __muldf3@plt
+; RV32ZFINX-NEXT: call __muldf3
; RV32ZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -501,12 +501,12 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
; RV64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64F-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64F-NEXT: mv s0, a0
-; RV64F-NEXT: call __muldf3@plt
+; RV64F-NEXT: call __muldf3
; RV64F-NEXT: li a1, -1
; RV64F-NEXT: slli a1, a1, 63
; RV64F-NEXT: or a1, a0, a1
; RV64F-NEXT: mv a0, s0
-; RV64F-NEXT: call __muldf3@plt
+; RV64F-NEXT: call __muldf3
; RV64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64F-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64F-NEXT: addi sp, sp, 16
@@ -518,12 +518,12 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
; RV64ZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZFINX-NEXT: mv s0, a0
-; RV64ZFINX-NEXT: call __muldf3@plt
+; RV64ZFINX-NEXT: call __muldf3
; RV64ZFINX-NEXT: li a1, -1
; RV64ZFINX-NEXT: slli a1, a1, 63
; RV64ZFINX-NEXT: or a1, a0, a1
; RV64ZFINX-NEXT: mv a0, s0
-; RV64ZFINX-NEXT: call __muldf3@plt
+; RV64ZFINX-NEXT: call __muldf3
; RV64ZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZFINX-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index 71b0f77015b5..35caa627b57b 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -22,7 +22,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB0_2: # %if.else
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_false:
; RV64IF: # %bb.0:
@@ -33,7 +33,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB0_2: # %if.else
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_false:
; RV32IZFINX: # %bb.0:
@@ -44,7 +44,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB0_2: # %if.else
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_false:
; RV64IZFINX: # %bb.0:
@@ -55,7 +55,7 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB0_2: # %if.else
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp false float %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -75,7 +75,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB1_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_oeq:
; RV64IF: # %bb.0:
@@ -86,7 +86,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB1_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_oeq:
; RV32IZFINX: # %bb.0:
@@ -97,7 +97,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB1_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_oeq:
; RV64IZFINX: # %bb.0:
@@ -108,7 +108,7 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB1_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp oeq float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -131,7 +131,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB2_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_oeq_alt:
; RV64IF: # %bb.0:
@@ -142,7 +142,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB2_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_oeq_alt:
; RV32IZFINX: # %bb.0:
@@ -153,7 +153,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB2_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_oeq_alt:
; RV64IZFINX: # %bb.0:
@@ -164,7 +164,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB2_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp oeq float %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -184,7 +184,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB3_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_ogt:
; RV64IF: # %bb.0:
@@ -195,7 +195,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB3_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_ogt:
; RV32IZFINX: # %bb.0:
@@ -206,7 +206,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB3_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_ogt:
; RV64IZFINX: # %bb.0:
@@ -217,7 +217,7 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB3_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp ogt float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -237,7 +237,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB4_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_oge:
; RV64IF: # %bb.0:
@@ -248,7 +248,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB4_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_oge:
; RV32IZFINX: # %bb.0:
@@ -259,7 +259,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB4_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_oge:
; RV64IZFINX: # %bb.0:
@@ -270,7 +270,7 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB4_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp oge float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -290,7 +290,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB5_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_olt:
; RV64IF: # %bb.0:
@@ -301,7 +301,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB5_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_olt:
; RV32IZFINX: # %bb.0:
@@ -312,7 +312,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB5_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_olt:
; RV64IZFINX: # %bb.0:
@@ -323,7 +323,7 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB5_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp olt float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -343,7 +343,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB6_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_ole:
; RV64IF: # %bb.0:
@@ -354,7 +354,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB6_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_ole:
; RV32IZFINX: # %bb.0:
@@ -365,7 +365,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB6_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_ole:
; RV64IZFINX: # %bb.0:
@@ -376,7 +376,7 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB6_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp ole float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -398,7 +398,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB7_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_one:
; RV64IF: # %bb.0:
@@ -411,7 +411,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB7_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_one:
; RV32IZFINX: # %bb.0:
@@ -424,7 +424,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB7_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_one:
; RV64IZFINX: # %bb.0:
@@ -437,7 +437,7 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB7_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp one float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -459,7 +459,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB8_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_ord:
; RV64IF: # %bb.0:
@@ -472,7 +472,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB8_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_ord:
; RV32IZFINX: # %bb.0:
@@ -485,7 +485,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB8_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_ord:
; RV64IZFINX: # %bb.0:
@@ -498,7 +498,7 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB8_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp ord float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -520,7 +520,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB9_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_ueq:
; RV64IF: # %bb.0:
@@ -533,7 +533,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB9_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_ueq:
; RV32IZFINX: # %bb.0:
@@ -546,7 +546,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB9_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_ueq:
; RV64IZFINX: # %bb.0:
@@ -559,7 +559,7 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB9_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp ueq float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -579,7 +579,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB10_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_ugt:
; RV64IF: # %bb.0:
@@ -590,7 +590,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB10_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_ugt:
; RV32IZFINX: # %bb.0:
@@ -601,7 +601,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB10_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_ugt:
; RV64IZFINX: # %bb.0:
@@ -612,7 +612,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB10_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp ugt float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -632,7 +632,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB11_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_uge:
; RV64IF: # %bb.0:
@@ -643,7 +643,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB11_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_uge:
; RV32IZFINX: # %bb.0:
@@ -654,7 +654,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB11_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_uge:
; RV64IZFINX: # %bb.0:
@@ -665,7 +665,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB11_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp uge float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -685,7 +685,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB12_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_ult:
; RV64IF: # %bb.0:
@@ -696,7 +696,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB12_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_ult:
; RV32IZFINX: # %bb.0:
@@ -707,7 +707,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB12_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_ult:
; RV64IZFINX: # %bb.0:
@@ -718,7 +718,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB12_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp ult float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -738,7 +738,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB13_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_ule:
; RV64IF: # %bb.0:
@@ -749,7 +749,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB13_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_ule:
; RV32IZFINX: # %bb.0:
@@ -760,7 +760,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB13_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_ule:
; RV64IZFINX: # %bb.0:
@@ -771,7 +771,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB13_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp ule float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -791,7 +791,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB14_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_une:
; RV64IF: # %bb.0:
@@ -802,7 +802,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB14_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_une:
; RV32IZFINX: # %bb.0:
@@ -813,7 +813,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB14_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_une:
; RV64IZFINX: # %bb.0:
@@ -824,7 +824,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB14_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp une float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -846,7 +846,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB15_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_uno:
; RV64IF: # %bb.0:
@@ -859,7 +859,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB15_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_uno:
; RV32IZFINX: # %bb.0:
@@ -872,7 +872,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB15_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_uno:
; RV64IZFINX: # %bb.0:
@@ -885,7 +885,7 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB15_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp uno float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -905,7 +905,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
; RV32IF-NEXT: .LBB16_2: # %if.then
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_true:
; RV64IF: # %bb.0:
@@ -916,7 +916,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
; RV64IF-NEXT: .LBB16_2: # %if.then
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_true:
; RV32IZFINX: # %bb.0:
@@ -927,7 +927,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: .LBB16_2: # %if.then
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_true:
; RV64IZFINX: # %bb.0:
@@ -938,7 +938,7 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: .LBB16_2: # %if.then
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
%1 = fcmp true float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -958,12 +958,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fmv.w.x fs0, zero
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call dummy@plt
+; RV32IF-NEXT: call dummy
; RV32IF-NEXT: feq.s a0, fa0, fs0
; RV32IF-NEXT: beqz a0, .LBB17_3
; RV32IF-NEXT: # %bb.1: # %if.end
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call dummy@plt
+; RV32IF-NEXT: call dummy
; RV32IF-NEXT: feq.s a0, fa0, fs0
; RV32IF-NEXT: beqz a0, .LBB17_3
; RV32IF-NEXT: # %bb.2: # %if.end4
@@ -973,7 +973,7 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB17_3: # %if.then
-; RV32IF-NEXT: call abort@plt
+; RV32IF-NEXT: call abort
;
; RV64IF-LABEL: br_fcmp_store_load_stack_slot:
; RV64IF: # %bb.0: # %entry
@@ -982,12 +982,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV64IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV64IF-NEXT: fmv.w.x fs0, zero
; RV64IF-NEXT: fmv.s fa0, fs0
-; RV64IF-NEXT: call dummy@plt
+; RV64IF-NEXT: call dummy
; RV64IF-NEXT: feq.s a0, fa0, fs0
; RV64IF-NEXT: beqz a0, .LBB17_3
; RV64IF-NEXT: # %bb.1: # %if.end
; RV64IF-NEXT: fmv.s fa0, fs0
-; RV64IF-NEXT: call dummy@plt
+; RV64IF-NEXT: call dummy
; RV64IF-NEXT: feq.s a0, fa0, fs0
; RV64IF-NEXT: beqz a0, .LBB17_3
; RV64IF-NEXT: # %bb.2: # %if.end4
@@ -997,19 +997,19 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB17_3: # %if.then
-; RV64IF-NEXT: call abort@plt
+; RV64IF-NEXT: call abort
;
; RV32IZFINX-LABEL: br_fcmp_store_load_stack_slot:
; RV32IZFINX: # %bb.0: # %entry
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: li a0, 0
-; RV32IZFINX-NEXT: call dummy@plt
+; RV32IZFINX-NEXT: call dummy
; RV32IZFINX-NEXT: feq.s a0, a0, zero
; RV32IZFINX-NEXT: beqz a0, .LBB17_3
; RV32IZFINX-NEXT: # %bb.1: # %if.end
; RV32IZFINX-NEXT: li a0, 0
-; RV32IZFINX-NEXT: call dummy@plt
+; RV32IZFINX-NEXT: call dummy
; RV32IZFINX-NEXT: feq.s a0, a0, zero
; RV32IZFINX-NEXT: beqz a0, .LBB17_3
; RV32IZFINX-NEXT: # %bb.2: # %if.end4
@@ -1018,19 +1018,19 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
; RV32IZFINX-NEXT: .LBB17_3: # %if.then
-; RV32IZFINX-NEXT: call abort@plt
+; RV32IZFINX-NEXT: call abort
;
; RV64IZFINX-LABEL: br_fcmp_store_load_stack_slot:
; RV64IZFINX: # %bb.0: # %entry
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: li a0, 0
-; RV64IZFINX-NEXT: call dummy@plt
+; RV64IZFINX-NEXT: call dummy
; RV64IZFINX-NEXT: feq.s a0, a0, zero
; RV64IZFINX-NEXT: beqz a0, .LBB17_3
; RV64IZFINX-NEXT: # %bb.1: # %if.end
; RV64IZFINX-NEXT: li a0, 0
-; RV64IZFINX-NEXT: call dummy@plt
+; RV64IZFINX-NEXT: call dummy
; RV64IZFINX-NEXT: feq.s a0, a0, zero
; RV64IZFINX-NEXT: beqz a0, .LBB17_3
; RV64IZFINX-NEXT: # %bb.2: # %if.end4
@@ -1039,7 +1039,7 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
; RV64IZFINX-NEXT: .LBB17_3: # %if.then
-; RV64IZFINX-NEXT: call abort@plt
+; RV64IZFINX-NEXT: call abort
entry:
%call = call float @dummy(float 0.000000e+00)
%cmp = fcmp une float %call, 0.000000e+00
diff --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
index 6168ade0839f..402d6f0362e6 100644
--- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
@@ -35,7 +35,7 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -44,7 +44,7 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixsfsi@plt
+; RV64I-NEXT: call __fixsfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -68,7 +68,7 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -77,7 +77,7 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfsi@plt
+; RV64I-NEXT: call __fixunssfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -107,7 +107,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: seqz a1, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -118,7 +118,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfsi@plt
+; RV64I-NEXT: call __fixunssfsi
; RV64I-NEXT: seqz a1, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -145,7 +145,7 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -155,7 +155,7 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -182,7 +182,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -192,7 +192,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -216,7 +216,7 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -226,7 +226,7 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -265,7 +265,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -275,7 +275,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -289,7 +289,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -303,7 +303,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -317,7 +317,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixsfdi@plt
+; RV32I-NEXT: call __fixsfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -326,7 +326,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -340,7 +340,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -354,7 +354,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -368,7 +368,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfdi@plt
+; RV32I-NEXT: call __fixunssfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -377,7 +377,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -391,7 +391,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __floatdisf@plt
+; RV32IF-NEXT: call __floatdisf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -405,7 +405,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __floatdisf@plt
+; RV32IZFINX-NEXT: call __floatdisf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -419,7 +419,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatdisf@plt
+; RV32I-NEXT: call __floatdisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -428,7 +428,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatdisf@plt
+; RV64I-NEXT: call __floatdisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -442,7 +442,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __floatundisf@plt
+; RV32IF-NEXT: call __floatundisf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -456,7 +456,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __floatundisf@plt
+; RV32IZFINX-NEXT: call __floatundisf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -470,7 +470,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatundisf@plt
+; RV32I-NEXT: call __floatundisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -479,7 +479,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatundisf@plt
+; RV64I-NEXT: call __floatundisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -503,7 +503,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -512,7 +512,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -536,7 +536,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -545,7 +545,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -569,7 +569,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -578,7 +578,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -602,7 +602,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -611,7 +611,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -661,7 +661,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -679,7 +679,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind stri
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: sw a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -732,7 +732,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -750,7 +750,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind str
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: sw a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 235979b12221..f1e444b5b624 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -27,7 +27,7 @@ define i32 @fcvt_w_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -36,7 +36,7 @@ define i32 @fcvt_w_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixsfsi@plt
+; RV64I-NEXT: call __fixsfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -73,10 +73,10 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 847872
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui s3, 524288
; RV32I-NEXT: bgez s2, .LBB1_2
@@ -86,14 +86,14 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
; RV32I-NEXT: lui a1, 323584
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB1_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: addi s1, s3, -1
; RV32I-NEXT: .LBB1_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -115,10 +115,10 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 847872
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB1_2
@@ -128,14 +128,14 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
; RV64I-NEXT: lui a1, 323584
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB1_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addiw s1, s3, -1
; RV64I-NEXT: .LBB1_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -167,7 +167,7 @@ define i32 @fcvt_wu_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -176,7 +176,7 @@ define i32 @fcvt_wu_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfsi@plt
+; RV64I-NEXT: call __fixunssfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -205,7 +205,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: seqz a1, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -216,7 +216,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfsi@plt
+; RV64I-NEXT: call __fixunssfsi
; RV64I-NEXT: seqz a1, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -277,16 +277,16 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s1, a0, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: and s1, s1, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: or a0, a0, s1
@@ -305,15 +305,15 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 325632
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -352,7 +352,7 @@ define i32 @fmv_x_w(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -361,7 +361,7 @@ define i32 @fmv_x_w(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -386,7 +386,7 @@ define float @fcvt_s_w(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -396,7 +396,7 @@ define float @fcvt_s_w(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -422,7 +422,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -432,7 +432,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -456,7 +456,7 @@ define float @fcvt_s_wu(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -466,7 +466,7 @@ define float @fcvt_s_wu(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -504,7 +504,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -514,7 +514,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -540,7 +540,7 @@ define float @fmv_w_x(i32 %a, i32 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -549,7 +549,7 @@ define float @fmv_w_x(i32 %a, i32 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -565,7 +565,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -579,7 +579,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -593,7 +593,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixsfdi@plt
+; RV32I-NEXT: call __fixsfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -602,7 +602,7 @@ define i64 @fcvt_l_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -621,7 +621,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IF-NEXT: lui a0, 913408
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fa0
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB12_2
@@ -668,7 +668,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
; RV32IZFINX-NEXT: beqz s1, .LBB12_2
@@ -717,10 +717,10 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 913408
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfdi@plt
+; RV32I-NEXT: call __fixsfdi
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: lui s5, 524288
@@ -731,7 +731,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32I-NEXT: lui a1, 389120
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: blez a0, .LBB12_4
; RV32I-NEXT: # %bb.3: # %start
@@ -739,7 +739,7 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32I-NEXT: .LBB12_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a1, a0, s3
@@ -770,10 +770,10 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 913408
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li s3, -1
; RV64I-NEXT: bgez s2, .LBB12_2
@@ -783,14 +783,14 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV64I-NEXT: lui a1, 389120
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB12_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: srli s1, s3, 1
; RV64I-NEXT: .LBB12_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -812,7 +812,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -826,7 +826,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -840,7 +840,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfdi@plt
+; RV32I-NEXT: call __fixunssfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -849,7 +849,7 @@ define i64 @fcvt_lu_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -868,7 +868,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32IF-NEXT: fmv.w.x fa5, zero
; RV32IF-NEXT: fle.s a0, fa5, fa0
; RV32IF-NEXT: neg s0, a0
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI14_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
@@ -902,7 +902,7 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: fle.s a0, zero, a0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI14_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI14_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
@@ -936,17 +936,17 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s2, a0, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixunssfdi@plt
+; RV32I-NEXT: call __fixunssfdi
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: and s3, s2, a0
; RV32I-NEXT: lui a1, 391168
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg a1, a0
; RV32I-NEXT: or a0, a1, s3
@@ -968,16 +968,16 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: addi s1, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: and s1, s1, a0
; RV64I-NEXT: lui a1, 391168
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: or a0, a0, s1
@@ -997,7 +997,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __floatdisf@plt
+; RV32IF-NEXT: call __floatdisf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1011,7 +1011,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __floatdisf@plt
+; RV32IZFINX-NEXT: call __floatdisf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1025,7 +1025,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatdisf@plt
+; RV32I-NEXT: call __floatdisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1034,7 +1034,7 @@ define float @fcvt_s_l(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatdisf@plt
+; RV64I-NEXT: call __floatdisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1047,7 +1047,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __floatundisf@plt
+; RV32IF-NEXT: call __floatundisf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1061,7 +1061,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __floatundisf@plt
+; RV32IZFINX-NEXT: call __floatundisf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1075,7 +1075,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatundisf@plt
+; RV32I-NEXT: call __floatundisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1084,7 +1084,7 @@ define float @fcvt_s_lu(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatundisf@plt
+; RV64I-NEXT: call __floatundisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1107,7 +1107,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1116,7 +1116,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1139,7 +1139,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1148,7 +1148,7 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1171,7 +1171,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1180,7 +1180,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1203,7 +1203,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1212,7 +1212,7 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1261,7 +1261,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatsisf@plt
+; RV32I-NEXT: call __floatsisf
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1279,7 +1279,7 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatsisf@plt
+; RV64I-NEXT: call __floatsisf
; RV64I-NEXT: sw a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1332,7 +1332,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatunsisf@plt
+; RV32I-NEXT: call __floatunsisf
; RV32I-NEXT: sw a0, 0(s0)
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1350,7 +1350,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatunsisf@plt
+; RV64I-NEXT: call __floatunsisf
; RV64I-NEXT: sw a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1389,7 +1389,7 @@ define signext i16 @fcvt_w_s_i16(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1398,7 +1398,7 @@ define signext i16 @fcvt_w_s_i16(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1470,10 +1470,10 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 815104
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: bgez s2, .LBB24_2
; RV32I-NEXT: # %bb.1: # %start
@@ -1482,7 +1482,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV32I-NEXT: lui a0, 290816
; RV32I-NEXT: addi a1, a0, -512
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB24_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: lui s1, 8
@@ -1490,7 +1490,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV32I-NEXT: .LBB24_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -1512,10 +1512,10 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 815104
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB24_2
; RV64I-NEXT: # %bb.1: # %start
@@ -1524,7 +1524,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV64I-NEXT: lui a0, 290816
; RV64I-NEXT: addiw a1, a0, -512
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB24_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: lui s1, 8
@@ -1532,7 +1532,7 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV64I-NEXT: .LBB24_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -1575,7 +1575,7 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1584,7 +1584,7 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1640,15 +1640,15 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 292864
; RV32I-NEXT: addi a1, a0, -256
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: blez a0, .LBB26_2
@@ -1677,15 +1677,15 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 292864
; RV64I-NEXT: addiw a1, a0, -256
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: blez a0, .LBB26_2
@@ -1735,7 +1735,7 @@ define signext i8 @fcvt_w_s_i8(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1744,7 +1744,7 @@ define signext i8 @fcvt_w_s_i8(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1814,10 +1814,10 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 798720
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: bgez s2, .LBB28_2
; RV32I-NEXT: # %bb.1: # %start
@@ -1825,14 +1825,14 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
; RV32I-NEXT: .LBB28_2: # %start
; RV32I-NEXT: lui a1, 274400
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB28_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: li s1, 127
; RV32I-NEXT: .LBB28_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -1854,10 +1854,10 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 798720
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB28_2
; RV64I-NEXT: # %bb.1: # %start
@@ -1865,14 +1865,14 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
; RV64I-NEXT: .LBB28_2: # %start
; RV64I-NEXT: lui a1, 274400
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB28_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: li s1, 127
; RV64I-NEXT: .LBB28_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -1915,7 +1915,7 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1924,7 +1924,7 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1978,14 +1978,14 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a1, 276464
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB30_2
; RV32I-NEXT: # %bb.1: # %start
; RV32I-NEXT: li a0, 255
@@ -2012,14 +2012,14 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 276464
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB30_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, 255
@@ -2091,16 +2091,16 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s1, a0, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: and s1, s1, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: or a0, a0, s1
@@ -2119,15 +2119,15 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 325632
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB31_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -2180,10 +2180,10 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 847872
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui s3, 524288
; RV32I-NEXT: bgez s2, .LBB32_2
@@ -2193,14 +2193,14 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
; RV32I-NEXT: lui a1, 323584
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB32_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: addi s1, s3, -1
; RV32I-NEXT: .LBB32_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -2222,10 +2222,10 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 847872
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB32_2
@@ -2235,14 +2235,14 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
; RV64I-NEXT: lui a1, 323584
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB32_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addi s1, s3, -1
; RV64I-NEXT: .LBB32_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
index 36eb58fe7454..dae9f3e089cf 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
@@ -31,7 +31,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -41,7 +41,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -74,7 +74,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -84,7 +84,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -116,7 +116,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -127,7 +127,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -160,7 +160,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltsf2@plt
+; RV32I-NEXT: call __ltsf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -170,7 +170,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltsf2@plt
+; RV64I-NEXT: call __ltsf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -202,7 +202,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __lesf2@plt
+; RV32I-NEXT: call __lesf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -212,7 +212,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __lesf2@plt
+; RV64I-NEXT: call __lesf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -261,11 +261,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: snez s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -284,11 +284,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: snez s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -321,7 +321,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -331,7 +331,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -382,11 +382,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: or a0, a0, s2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -405,11 +405,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: or a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -447,7 +447,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __lesf2@plt
+; RV32I-NEXT: call __lesf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -457,7 +457,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __lesf2@plt
+; RV64I-NEXT: call __lesf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -491,7 +491,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltsf2@plt
+; RV32I-NEXT: call __ltsf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -502,7 +502,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltsf2@plt
+; RV64I-NEXT: call __ltsf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -537,7 +537,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -547,7 +547,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -581,7 +581,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -591,7 +591,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -618,7 +618,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __nesf2@plt
+; RV32I-NEXT: call __nesf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -628,7 +628,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __nesf2@plt
+; RV64I-NEXT: call __nesf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -659,7 +659,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -669,7 +669,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -698,7 +698,7 @@ define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -708,7 +708,7 @@ define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -734,7 +734,7 @@ define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -744,7 +744,7 @@ define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -769,7 +769,7 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -780,7 +780,7 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -806,7 +806,7 @@ define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltsf2@plt
+; RV32I-NEXT: call __ltsf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -816,7 +816,7 @@ define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltsf2@plt
+; RV64I-NEXT: call __ltsf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -841,7 +841,7 @@ define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __lesf2@plt
+; RV32I-NEXT: call __lesf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -851,7 +851,7 @@ define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __lesf2@plt
+; RV64I-NEXT: call __lesf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -885,11 +885,11 @@ define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: snez s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -908,11 +908,11 @@ define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: snez s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -945,7 +945,7 @@ define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -955,7 +955,7 @@ define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -991,11 +991,11 @@ define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: or a0, a0, s2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1014,11 +1014,11 @@ define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: or a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1049,7 +1049,7 @@ define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __lesf2@plt
+; RV32I-NEXT: call __lesf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1059,7 +1059,7 @@ define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __lesf2@plt
+; RV64I-NEXT: call __lesf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1086,7 +1086,7 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltsf2@plt
+; RV32I-NEXT: call __ltsf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1097,7 +1097,7 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltsf2@plt
+; RV64I-NEXT: call __ltsf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1125,7 +1125,7 @@ define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1135,7 +1135,7 @@ define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1162,7 +1162,7 @@ define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1172,7 +1172,7 @@ define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1203,7 +1203,7 @@ define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __nesf2@plt
+; RV32I-NEXT: call __nesf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1213,7 +1213,7 @@ define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __nesf2@plt
+; RV64I-NEXT: call __nesf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1244,7 +1244,7 @@ define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1254,7 +1254,7 @@ define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll
index b4fbed1321e2..265d553a3e5d 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll
@@ -52,7 +52,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -62,7 +62,7 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -87,7 +87,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -97,7 +97,7 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -122,7 +122,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -133,7 +133,7 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -159,7 +159,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltsf2@plt
+; RV32I-NEXT: call __ltsf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -169,7 +169,7 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltsf2@plt
+; RV64I-NEXT: call __ltsf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -194,7 +194,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __lesf2@plt
+; RV32I-NEXT: call __lesf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -204,7 +204,7 @@ define i32 @fcmp_ole(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __lesf2@plt
+; RV64I-NEXT: call __lesf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -238,11 +238,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: snez s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -261,11 +261,11 @@ define i32 @fcmp_one(float %a, float %b) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: snez s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -298,7 +298,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -308,7 +308,7 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -344,11 +344,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz s2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: or a0, a0, s2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -367,11 +367,11 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz s2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: or a0, a0, s2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -402,7 +402,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __lesf2@plt
+; RV32I-NEXT: call __lesf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -412,7 +412,7 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __lesf2@plt
+; RV64I-NEXT: call __lesf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -439,7 +439,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ltsf2@plt
+; RV32I-NEXT: call __ltsf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -450,7 +450,7 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __ltsf2@plt
+; RV64I-NEXT: call __ltsf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: xori a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -478,7 +478,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -488,7 +488,7 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -515,7 +515,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: slti a0, a0, 1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -525,7 +525,7 @@ define i32 @fcmp_ule(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: slti a0, a0, 1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -552,7 +552,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __nesf2@plt
+; RV32I-NEXT: call __nesf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -562,7 +562,7 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __nesf2@plt
+; RV64I-NEXT: call __nesf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -593,7 +593,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -603,7 +603,7 @@ define i32 @fcmp_uno(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/float-frem.ll b/llvm/test/CodeGen/RISCV/float-frem.ll
index 6c15da0cca7b..651b1b116adc 100644
--- a/llvm/test/CodeGen/RISCV/float-frem.ll
+++ b/llvm/test/CodeGen/RISCV/float-frem.ll
@@ -15,21 +15,21 @@
define float @frem_f32(float %a, float %b) nounwind {
; RV32IF-LABEL: frem_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail fmodf@plt
+; RV32IF-NEXT: tail fmodf
;
; RV64IF-LABEL: frem_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail fmodf@plt
+; RV64IF-NEXT: tail fmodf
;
; RV32IZFINX-LABEL: frem_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail fmodf@plt
+; RV32IZFINX-NEXT: tail fmodf
;
; RV64IZFINX-LABEL: frem_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call fmodf@plt
+; RV64IZFINX-NEXT: call fmodf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -38,7 +38,7 @@ define float @frem_f32(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmodf@plt
+; RV32I-NEXT: call fmodf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -47,7 +47,7 @@ define float @frem_f32(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmodf@plt
+; RV64I-NEXT: call fmodf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index d149b35f61c8..626db1985bfc 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -35,7 +35,7 @@ define float @sqrt_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrtf@plt
+; RV32I-NEXT: call sqrtf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -44,7 +44,7 @@ define float @sqrt_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrtf@plt
+; RV64I-NEXT: call sqrtf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -59,7 +59,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call __powisf2@plt
+; RV32IF-NEXT: call __powisf2
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -69,7 +69,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sext.w a0, a0
-; RV64IF-NEXT: call __powisf2@plt
+; RV64IF-NEXT: call __powisf2
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -78,7 +78,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call __powisf2@plt
+; RV32IZFINX-NEXT: call __powisf2
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -88,7 +88,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sext.w a1, a1
-; RV64IZFINX-NEXT: call __powisf2@plt
+; RV64IZFINX-NEXT: call __powisf2
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -97,7 +97,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __powisf2@plt
+; RV32I-NEXT: call __powisf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -107,7 +107,7 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __powisf2@plt
+; RV64I-NEXT: call __powisf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -122,7 +122,7 @@ define float @sin_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call sinf@plt
+; RV32IF-NEXT: call sinf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -131,7 +131,7 @@ define float @sin_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call sinf@plt
+; RV64IF-NEXT: call sinf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -140,7 +140,7 @@ define float @sin_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call sinf@plt
+; RV32IZFINX-NEXT: call sinf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -149,7 +149,7 @@ define float @sin_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call sinf@plt
+; RV64IZFINX-NEXT: call sinf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -158,7 +158,7 @@ define float @sin_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sinf@plt
+; RV32I-NEXT: call sinf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -167,7 +167,7 @@ define float @sin_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sinf@plt
+; RV64I-NEXT: call sinf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -182,7 +182,7 @@ define float @cos_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call cosf@plt
+; RV32IF-NEXT: call cosf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -191,7 +191,7 @@ define float @cos_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call cosf@plt
+; RV64IF-NEXT: call cosf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -200,7 +200,7 @@ define float @cos_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call cosf@plt
+; RV32IZFINX-NEXT: call cosf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -209,7 +209,7 @@ define float @cos_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call cosf@plt
+; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -218,7 +218,7 @@ define float @cos_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call cosf@plt
+; RV32I-NEXT: call cosf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -227,7 +227,7 @@ define float @cos_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call cosf@plt
+; RV64I-NEXT: call cosf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -244,10 +244,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fsw fs1, 4(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fmv.s fs0, fa0
-; RV32IF-NEXT: call sinf@plt
+; RV32IF-NEXT: call sinf
; RV32IF-NEXT: fmv.s fs1, fa0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call cosf@plt
+; RV32IF-NEXT: call cosf
; RV32IF-NEXT: fadd.s fa0, fs1, fa0
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload
@@ -262,10 +262,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV64IF-NEXT: fsw fs1, 0(sp) # 4-byte Folded Spill
; RV64IF-NEXT: fmv.s fs0, fa0
-; RV64IF-NEXT: call sinf@plt
+; RV64IF-NEXT: call sinf
; RV64IF-NEXT: fmv.s fs1, fa0
; RV64IF-NEXT: fmv.s fa0, fs0
-; RV64IF-NEXT: call cosf@plt
+; RV64IF-NEXT: call cosf
; RV64IF-NEXT: fadd.s fa0, fs1, fa0
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
@@ -280,10 +280,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
-; RV32IZFINX-NEXT: call sinf@plt
+; RV32IZFINX-NEXT: call sinf
; RV32IZFINX-NEXT: mv s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call cosf@plt
+; RV32IZFINX-NEXT: call cosf
; RV32IZFINX-NEXT: fadd.s a0, s1, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -298,10 +298,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: mv s0, a0
-; RV64IZFINX-NEXT: call sinf@plt
+; RV64IZFINX-NEXT: call sinf
; RV64IZFINX-NEXT: mv s1, a0
; RV64IZFINX-NEXT: mv a0, s0
-; RV64IZFINX-NEXT: call cosf@plt
+; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: fadd.s a0, s1, a0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -316,13 +316,13 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: call sinf@plt
+; RV32I-NEXT: call sinf
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call cosf@plt
+; RV32I-NEXT: call cosf
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -336,13 +336,13 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: call sinf@plt
+; RV64I-NEXT: call sinf
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call cosf@plt
+; RV64I-NEXT: call cosf
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -361,7 +361,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call powf@plt
+; RV32IF-NEXT: call powf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -370,7 +370,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call powf@plt
+; RV64IF-NEXT: call powf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -379,7 +379,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call powf@plt
+; RV32IZFINX-NEXT: call powf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -388,7 +388,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call powf@plt
+; RV64IZFINX-NEXT: call powf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -397,7 +397,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call powf@plt
+; RV32I-NEXT: call powf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -406,7 +406,7 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call powf@plt
+; RV64I-NEXT: call powf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -421,7 +421,7 @@ define float @exp_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call expf@plt
+; RV32IF-NEXT: call expf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -430,7 +430,7 @@ define float @exp_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call expf@plt
+; RV64IF-NEXT: call expf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -439,7 +439,7 @@ define float @exp_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call expf@plt
+; RV32IZFINX-NEXT: call expf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -448,7 +448,7 @@ define float @exp_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call expf@plt
+; RV64IZFINX-NEXT: call expf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -457,7 +457,7 @@ define float @exp_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call expf@plt
+; RV32I-NEXT: call expf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -466,7 +466,7 @@ define float @exp_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call expf@plt
+; RV64I-NEXT: call expf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -481,7 +481,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call exp2f@plt
+; RV32IF-NEXT: call exp2f
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -490,7 +490,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call exp2f@plt
+; RV64IF-NEXT: call exp2f
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -499,7 +499,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call exp2f@plt
+; RV32IZFINX-NEXT: call exp2f
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -508,7 +508,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call exp2f@plt
+; RV64IZFINX-NEXT: call exp2f
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -517,7 +517,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call exp2f@plt
+; RV32I-NEXT: call exp2f
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -526,7 +526,7 @@ define float @exp2_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call exp2f@plt
+; RV64I-NEXT: call exp2f
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -541,7 +541,7 @@ define float @log_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call logf@plt
+; RV32IF-NEXT: call logf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -550,7 +550,7 @@ define float @log_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call logf@plt
+; RV64IF-NEXT: call logf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -559,7 +559,7 @@ define float @log_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call logf@plt
+; RV32IZFINX-NEXT: call logf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -568,7 +568,7 @@ define float @log_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call logf@plt
+; RV64IZFINX-NEXT: call logf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -577,7 +577,7 @@ define float @log_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call logf@plt
+; RV32I-NEXT: call logf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -586,7 +586,7 @@ define float @log_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call logf@plt
+; RV64I-NEXT: call logf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -601,7 +601,7 @@ define float @log10_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call log10f@plt
+; RV32IF-NEXT: call log10f
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -610,7 +610,7 @@ define float @log10_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call log10f@plt
+; RV64IF-NEXT: call log10f
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -619,7 +619,7 @@ define float @log10_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call log10f@plt
+; RV32IZFINX-NEXT: call log10f
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -628,7 +628,7 @@ define float @log10_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call log10f@plt
+; RV64IZFINX-NEXT: call log10f
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -637,7 +637,7 @@ define float @log10_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log10f@plt
+; RV32I-NEXT: call log10f
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -646,7 +646,7 @@ define float @log10_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log10f@plt
+; RV64I-NEXT: call log10f
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -661,7 +661,7 @@ define float @log2_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call log2f@plt
+; RV32IF-NEXT: call log2f
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -670,7 +670,7 @@ define float @log2_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call log2f@plt
+; RV64IF-NEXT: call log2f
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -679,7 +679,7 @@ define float @log2_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call log2f@plt
+; RV32IZFINX-NEXT: call log2f
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -688,7 +688,7 @@ define float @log2_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call log2f@plt
+; RV64IZFINX-NEXT: call log2f
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -697,7 +697,7 @@ define float @log2_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log2f@plt
+; RV32I-NEXT: call log2f
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -706,7 +706,7 @@ define float @log2_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log2f@plt
+; RV64I-NEXT: call log2f
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -731,7 +731,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -740,7 +740,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -767,9 +767,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a2
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -781,9 +781,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -799,7 +799,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call fminf@plt
+; RV32IF-NEXT: call fminf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -808,7 +808,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call fminf@plt
+; RV64IF-NEXT: call fminf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -817,7 +817,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call fminf@plt
+; RV32IZFINX-NEXT: call fminf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -826,7 +826,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call fminf@plt
+; RV64IZFINX-NEXT: call fminf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -835,7 +835,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fminf@plt
+; RV32I-NEXT: call fminf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -844,7 +844,7 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fminf@plt
+; RV64I-NEXT: call fminf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -859,7 +859,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call fmaxf@plt
+; RV32IF-NEXT: call fmaxf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -868,7 +868,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call fmaxf@plt
+; RV64IF-NEXT: call fmaxf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -877,7 +877,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call fmaxf@plt
+; RV32IZFINX-NEXT: call fmaxf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -886,7 +886,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call fmaxf@plt
+; RV64IZFINX-NEXT: call fmaxf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -895,7 +895,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaxf@plt
+; RV32I-NEXT: call fmaxf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -904,7 +904,7 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaxf@plt
+; RV64I-NEXT: call fmaxf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -936,7 +936,7 @@ define float @floor_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call floorf@plt
+; RV32IF-NEXT: call floorf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -945,7 +945,7 @@ define float @floor_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call floorf@plt
+; RV64IF-NEXT: call floorf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -954,7 +954,7 @@ define float @floor_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call floorf@plt
+; RV32IZFINX-NEXT: call floorf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -963,7 +963,7 @@ define float @floor_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call floorf@plt
+; RV64IZFINX-NEXT: call floorf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -972,7 +972,7 @@ define float @floor_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call floorf@plt
+; RV32I-NEXT: call floorf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -981,7 +981,7 @@ define float @floor_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call floorf@plt
+; RV64I-NEXT: call floorf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -996,7 +996,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call ceilf@plt
+; RV32IF-NEXT: call ceilf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1005,7 +1005,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call ceilf@plt
+; RV64IF-NEXT: call ceilf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -1014,7 +1014,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call ceilf@plt
+; RV32IZFINX-NEXT: call ceilf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1023,7 +1023,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call ceilf@plt
+; RV64IZFINX-NEXT: call ceilf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -1032,7 +1032,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call ceilf@plt
+; RV32I-NEXT: call ceilf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1041,7 +1041,7 @@ define float @ceil_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call ceilf@plt
+; RV64I-NEXT: call ceilf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1056,7 +1056,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call truncf@plt
+; RV32IF-NEXT: call truncf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1065,7 +1065,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call truncf@plt
+; RV64IF-NEXT: call truncf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -1074,7 +1074,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call truncf@plt
+; RV32IZFINX-NEXT: call truncf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1083,7 +1083,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call truncf@plt
+; RV64IZFINX-NEXT: call truncf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -1092,7 +1092,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call truncf@plt
+; RV32I-NEXT: call truncf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1101,7 +1101,7 @@ define float @trunc_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call truncf@plt
+; RV64I-NEXT: call truncf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1116,7 +1116,7 @@ define float @rint_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call rintf@plt
+; RV32IF-NEXT: call rintf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1125,7 +1125,7 @@ define float @rint_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call rintf@plt
+; RV64IF-NEXT: call rintf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -1134,7 +1134,7 @@ define float @rint_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call rintf@plt
+; RV32IZFINX-NEXT: call rintf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1143,7 +1143,7 @@ define float @rint_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call rintf@plt
+; RV64IZFINX-NEXT: call rintf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -1152,7 +1152,7 @@ define float @rint_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call rintf@plt
+; RV32I-NEXT: call rintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1161,7 +1161,7 @@ define float @rint_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call rintf@plt
+; RV64I-NEXT: call rintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1176,7 +1176,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call nearbyintf@plt
+; RV32IF-NEXT: call nearbyintf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1185,7 +1185,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call nearbyintf@plt
+; RV64IF-NEXT: call nearbyintf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -1194,7 +1194,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call nearbyintf@plt
+; RV32IZFINX-NEXT: call nearbyintf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1203,7 +1203,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call nearbyintf@plt
+; RV64IZFINX-NEXT: call nearbyintf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -1212,7 +1212,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call nearbyintf@plt
+; RV32I-NEXT: call nearbyintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1221,7 +1221,7 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call nearbyintf@plt
+; RV64I-NEXT: call nearbyintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1236,7 +1236,7 @@ define float @round_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call roundf@plt
+; RV32IF-NEXT: call roundf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1245,7 +1245,7 @@ define float @round_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call roundf@plt
+; RV64IF-NEXT: call roundf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -1254,7 +1254,7 @@ define float @round_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call roundf@plt
+; RV32IZFINX-NEXT: call roundf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1263,7 +1263,7 @@ define float @round_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call roundf@plt
+; RV64IZFINX-NEXT: call roundf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -1272,7 +1272,7 @@ define float @round_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call roundf@plt
+; RV32I-NEXT: call roundf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1281,7 +1281,7 @@ define float @round_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call roundf@plt
+; RV64I-NEXT: call roundf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1296,7 +1296,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call roundevenf@plt
+; RV32IF-NEXT: call roundevenf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1305,7 +1305,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call roundevenf@plt
+; RV64IF-NEXT: call roundevenf
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -1314,7 +1314,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call roundevenf@plt
+; RV32IZFINX-NEXT: call roundevenf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1323,7 +1323,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call roundevenf@plt
+; RV64IZFINX-NEXT: call roundevenf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -1332,7 +1332,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call roundevenf@plt
+; RV32I-NEXT: call roundevenf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1341,7 +1341,7 @@ define float @roundeven_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call roundevenf@plt
+; RV64I-NEXT: call roundevenf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1376,7 +1376,7 @@ define iXLen @lrint_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lrintf@plt
+; RV32I-NEXT: call lrintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1385,7 +1385,7 @@ define iXLen @lrint_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lrintf@plt
+; RV64I-NEXT: call lrintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1420,7 +1420,7 @@ define iXLen @lround_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lroundf@plt
+; RV32I-NEXT: call lroundf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1429,7 +1429,7 @@ define iXLen @lround_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lroundf@plt
+; RV64I-NEXT: call lroundf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1444,7 +1444,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call llrintf@plt
+; RV32IF-NEXT: call llrintf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1458,7 +1458,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call llrintf@plt
+; RV32IZFINX-NEXT: call llrintf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1472,7 +1472,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llrintf@plt
+; RV32I-NEXT: call llrintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1481,7 +1481,7 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llrintf@plt
+; RV64I-NEXT: call llrintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1496,7 +1496,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call llroundf@plt
+; RV32IF-NEXT: call llroundf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1510,7 +1510,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call llroundf@plt
+; RV32IZFINX-NEXT: call llroundf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1524,7 +1524,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llroundf@plt
+; RV32I-NEXT: call llroundf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1533,7 +1533,7 @@ define i64 @llround_f32(float %a) nounwind strictfp {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llroundf@plt
+; RV64I-NEXT: call llroundf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index e7ec2fdaf93f..a00d82942cab 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -49,7 +49,7 @@ define float @sqrt_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sqrtf@plt
+; RV32I-NEXT: call sqrtf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -58,7 +58,7 @@ define float @sqrt_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sqrtf@plt
+; RV64I-NEXT: call sqrtf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -71,18 +71,18 @@ declare float @llvm.powi.f32.i32(float, i32)
define float @powi_f32(float %a, i32 %b) nounwind {
; RV32IF-LABEL: powi_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail __powisf2@plt
+; RV32IF-NEXT: tail __powisf2
;
; RV32IZFINX-LABEL: powi_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail __powisf2@plt
+; RV32IZFINX-NEXT: tail __powisf2
;
; RV64IF-LABEL: powi_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sext.w a0, a0
-; RV64IF-NEXT: call __powisf2@plt
+; RV64IF-NEXT: call __powisf2
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -92,7 +92,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sext.w a1, a1
-; RV64IZFINX-NEXT: call __powisf2@plt
+; RV64IZFINX-NEXT: call __powisf2
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -101,7 +101,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __powisf2@plt
+; RV32I-NEXT: call __powisf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -111,7 +111,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __powisf2@plt
+; RV64I-NEXT: call __powisf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -124,21 +124,21 @@ declare float @llvm.sin.f32(float)
define float @sin_f32(float %a) nounwind {
; RV32IF-LABEL: sin_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail sinf@plt
+; RV32IF-NEXT: tail sinf
;
; RV32IZFINX-LABEL: sin_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail sinf@plt
+; RV32IZFINX-NEXT: tail sinf
;
; RV64IF-LABEL: sin_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail sinf@plt
+; RV64IF-NEXT: tail sinf
;
; RV64IZFINX-LABEL: sin_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call sinf@plt
+; RV64IZFINX-NEXT: call sinf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -147,7 +147,7 @@ define float @sin_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call sinf@plt
+; RV32I-NEXT: call sinf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -156,7 +156,7 @@ define float @sin_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call sinf@plt
+; RV64I-NEXT: call sinf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -169,21 +169,21 @@ declare float @llvm.cos.f32(float)
define float @cos_f32(float %a) nounwind {
; RV32IF-LABEL: cos_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail cosf@plt
+; RV32IF-NEXT: tail cosf
;
; RV32IZFINX-LABEL: cos_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail cosf@plt
+; RV32IZFINX-NEXT: tail cosf
;
; RV64IF-LABEL: cos_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail cosf@plt
+; RV64IF-NEXT: tail cosf
;
; RV64IZFINX-LABEL: cos_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call cosf@plt
+; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -192,7 +192,7 @@ define float @cos_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call cosf@plt
+; RV32I-NEXT: call cosf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -201,7 +201,7 @@ define float @cos_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call cosf@plt
+; RV64I-NEXT: call cosf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -218,10 +218,10 @@ define float @sincos_f32(float %a) nounwind {
; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fsw fs1, 4(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fmv.s fs0, fa0
-; RV32IF-NEXT: call sinf@plt
+; RV32IF-NEXT: call sinf
; RV32IF-NEXT: fmv.s fs1, fa0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call cosf@plt
+; RV32IF-NEXT: call cosf
; RV32IF-NEXT: fadd.s fa0, fs1, fa0
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: flw fs0, 8(sp) # 4-byte Folded Reload
@@ -236,10 +236,10 @@ define float @sincos_f32(float %a) nounwind {
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
-; RV32IZFINX-NEXT: call sinf@plt
+; RV32IZFINX-NEXT: call sinf
; RV32IZFINX-NEXT: mv s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call cosf@plt
+; RV32IZFINX-NEXT: call cosf
; RV32IZFINX-NEXT: fadd.s a0, s1, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -254,10 +254,10 @@ define float @sincos_f32(float %a) nounwind {
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: mv s0, a0
-; RV64IZFINX-NEXT: call sinf@plt
+; RV64IZFINX-NEXT: call sinf
; RV64IZFINX-NEXT: mv s1, a0
; RV64IZFINX-NEXT: mv a0, s0
-; RV64IZFINX-NEXT: call cosf@plt
+; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: fadd.s a0, s1, a0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -272,13 +272,13 @@ define float @sincos_f32(float %a) nounwind {
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: call sinf@plt
+; RV32I-NEXT: call sinf
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call cosf@plt
+; RV32I-NEXT: call cosf
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -292,13 +292,13 @@ define float @sincos_f32(float %a) nounwind {
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: call sinf@plt
+; RV64I-NEXT: call sinf
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call cosf@plt
+; RV64I-NEXT: call cosf
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -315,21 +315,21 @@ declare float @llvm.pow.f32(float, float)
define float @pow_f32(float %a, float %b) nounwind {
; RV32IF-LABEL: pow_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail powf@plt
+; RV32IF-NEXT: tail powf
;
; RV32IZFINX-LABEL: pow_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail powf@plt
+; RV32IZFINX-NEXT: tail powf
;
; RV64IF-LABEL: pow_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail powf@plt
+; RV64IF-NEXT: tail powf
;
; RV64IZFINX-LABEL: pow_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call powf@plt
+; RV64IZFINX-NEXT: call powf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -338,7 +338,7 @@ define float @pow_f32(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call powf@plt
+; RV32I-NEXT: call powf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -347,7 +347,7 @@ define float @pow_f32(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call powf@plt
+; RV64I-NEXT: call powf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -360,21 +360,21 @@ declare float @llvm.exp.f32(float)
define float @exp_f32(float %a) nounwind {
; RV32IF-LABEL: exp_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail expf@plt
+; RV32IF-NEXT: tail expf
;
; RV32IZFINX-LABEL: exp_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail expf@plt
+; RV32IZFINX-NEXT: tail expf
;
; RV64IF-LABEL: exp_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail expf@plt
+; RV64IF-NEXT: tail expf
;
; RV64IZFINX-LABEL: exp_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call expf@plt
+; RV64IZFINX-NEXT: call expf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -383,7 +383,7 @@ define float @exp_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call expf@plt
+; RV32I-NEXT: call expf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -392,7 +392,7 @@ define float @exp_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call expf@plt
+; RV64I-NEXT: call expf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -405,21 +405,21 @@ declare float @llvm.exp2.f32(float)
define float @exp2_f32(float %a) nounwind {
; RV32IF-LABEL: exp2_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail exp2f@plt
+; RV32IF-NEXT: tail exp2f
;
; RV32IZFINX-LABEL: exp2_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail exp2f@plt
+; RV32IZFINX-NEXT: tail exp2f
;
; RV64IF-LABEL: exp2_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail exp2f@plt
+; RV64IF-NEXT: tail exp2f
;
; RV64IZFINX-LABEL: exp2_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call exp2f@plt
+; RV64IZFINX-NEXT: call exp2f
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -428,7 +428,7 @@ define float @exp2_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call exp2f@plt
+; RV32I-NEXT: call exp2f
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -437,7 +437,7 @@ define float @exp2_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call exp2f@plt
+; RV64I-NEXT: call exp2f
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -450,21 +450,21 @@ declare float @llvm.log.f32(float)
define float @log_f32(float %a) nounwind {
; RV32IF-LABEL: log_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail logf@plt
+; RV32IF-NEXT: tail logf
;
; RV32IZFINX-LABEL: log_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail logf@plt
+; RV32IZFINX-NEXT: tail logf
;
; RV64IF-LABEL: log_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail logf@plt
+; RV64IF-NEXT: tail logf
;
; RV64IZFINX-LABEL: log_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call logf@plt
+; RV64IZFINX-NEXT: call logf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -473,7 +473,7 @@ define float @log_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call logf@plt
+; RV32I-NEXT: call logf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -482,7 +482,7 @@ define float @log_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call logf@plt
+; RV64I-NEXT: call logf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -495,21 +495,21 @@ declare float @llvm.log10.f32(float)
define float @log10_f32(float %a) nounwind {
; RV32IF-LABEL: log10_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail log10f@plt
+; RV32IF-NEXT: tail log10f
;
; RV32IZFINX-LABEL: log10_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail log10f@plt
+; RV32IZFINX-NEXT: tail log10f
;
; RV64IF-LABEL: log10_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail log10f@plt
+; RV64IF-NEXT: tail log10f
;
; RV64IZFINX-LABEL: log10_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call log10f@plt
+; RV64IZFINX-NEXT: call log10f
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -518,7 +518,7 @@ define float @log10_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log10f@plt
+; RV32I-NEXT: call log10f
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -527,7 +527,7 @@ define float @log10_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log10f@plt
+; RV64I-NEXT: call log10f
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -540,21 +540,21 @@ declare float @llvm.log2.f32(float)
define float @log2_f32(float %a) nounwind {
; RV32IF-LABEL: log2_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail log2f@plt
+; RV32IF-NEXT: tail log2f
;
; RV32IZFINX-LABEL: log2_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail log2f@plt
+; RV32IZFINX-NEXT: tail log2f
;
; RV64IF-LABEL: log2_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail log2f@plt
+; RV64IF-NEXT: tail log2f
;
; RV64IZFINX-LABEL: log2_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call log2f@plt
+; RV64IZFINX-NEXT: call log2f
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -563,7 +563,7 @@ define float @log2_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call log2f@plt
+; RV32I-NEXT: call log2f
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -572,7 +572,7 @@ define float @log2_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call log2f@plt
+; RV64I-NEXT: call log2f
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -607,7 +607,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaf@plt
+; RV32I-NEXT: call fmaf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -616,7 +616,7 @@ define float @fma_f32(float %a, float %b, float %c) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaf@plt
+; RV64I-NEXT: call fmaf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -653,9 +653,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a2
-; RV32I-NEXT: call __mulsf3@plt
+; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __addsf3@plt
+; RV32I-NEXT: call __addsf3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -667,9 +667,9 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a2
-; RV64I-NEXT: call __mulsf3@plt
+; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __addsf3@plt
+; RV64I-NEXT: call __addsf3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -745,7 +745,7 @@ define float @minnum_f32(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fminf@plt
+; RV32I-NEXT: call fminf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -754,7 +754,7 @@ define float @minnum_f32(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fminf@plt
+; RV64I-NEXT: call fminf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -789,7 +789,7 @@ define float @maxnum_f32(float %a, float %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call fmaxf@plt
+; RV32I-NEXT: call fmaxf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -798,7 +798,7 @@ define float @maxnum_f32(float %a, float %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call fmaxf@plt
+; RV64I-NEXT: call fmaxf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -928,7 +928,7 @@ define float @floor_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call floorf@plt
+; RV32I-NEXT: call floorf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -937,7 +937,7 @@ define float @floor_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call floorf@plt
+; RV64I-NEXT: call floorf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1006,7 +1006,7 @@ define float @ceil_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call ceilf@plt
+; RV32I-NEXT: call ceilf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1015,7 +1015,7 @@ define float @ceil_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call ceilf@plt
+; RV64I-NEXT: call ceilf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1084,7 +1084,7 @@ define float @trunc_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call truncf@plt
+; RV32I-NEXT: call truncf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1093,7 +1093,7 @@ define float @trunc_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call truncf@plt
+; RV64I-NEXT: call truncf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1162,7 +1162,7 @@ define float @rint_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call rintf@plt
+; RV32I-NEXT: call rintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1171,7 +1171,7 @@ define float @rint_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call rintf@plt
+; RV64I-NEXT: call rintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1184,21 +1184,21 @@ declare float @llvm.nearbyint.f32(float)
define float @nearbyint_f32(float %a) nounwind {
; RV32IF-LABEL: nearbyint_f32:
; RV32IF: # %bb.0:
-; RV32IF-NEXT: tail nearbyintf@plt
+; RV32IF-NEXT: tail nearbyintf
;
; RV32IZFINX-LABEL: nearbyint_f32:
; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: tail nearbyintf@plt
+; RV32IZFINX-NEXT: tail nearbyintf
;
; RV64IF-LABEL: nearbyint_f32:
; RV64IF: # %bb.0:
-; RV64IF-NEXT: tail nearbyintf@plt
+; RV64IF-NEXT: tail nearbyintf
;
; RV64IZFINX-LABEL: nearbyint_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call nearbyintf@plt
+; RV64IZFINX-NEXT: call nearbyintf
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
@@ -1207,7 +1207,7 @@ define float @nearbyint_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call nearbyintf@plt
+; RV32I-NEXT: call nearbyintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1216,7 +1216,7 @@ define float @nearbyint_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call nearbyintf@plt
+; RV64I-NEXT: call nearbyintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1285,7 +1285,7 @@ define float @round_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call roundf@plt
+; RV32I-NEXT: call roundf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1294,7 +1294,7 @@ define float @round_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call roundf@plt
+; RV64I-NEXT: call roundf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1363,7 +1363,7 @@ define float @roundeven_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call roundevenf@plt
+; RV32I-NEXT: call roundevenf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1372,7 +1372,7 @@ define float @roundeven_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call roundevenf@plt
+; RV64I-NEXT: call roundevenf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1407,7 +1407,7 @@ define iXLen @lrint_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lrintf@plt
+; RV32I-NEXT: call lrintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1416,7 +1416,7 @@ define iXLen @lrint_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lrintf@plt
+; RV64I-NEXT: call lrintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1452,7 +1452,7 @@ define iXLen @lround_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lroundf@plt
+; RV32I-NEXT: call lroundf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1461,7 +1461,7 @@ define iXLen @lround_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lroundf@plt
+; RV64I-NEXT: call lroundf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1496,7 +1496,7 @@ define i32 @lround_i32_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call lroundf@plt
+; RV32I-NEXT: call lroundf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1505,7 +1505,7 @@ define i32 @lround_i32_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call lroundf@plt
+; RV64I-NEXT: call lroundf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1520,7 +1520,7 @@ define i64 @llrint_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call llrintf@plt
+; RV32IF-NEXT: call llrintf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1529,7 +1529,7 @@ define i64 @llrint_f32(float %a) nounwind {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call llrintf@plt
+; RV32IZFINX-NEXT: call llrintf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1548,7 +1548,7 @@ define i64 @llrint_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llrintf@plt
+; RV32I-NEXT: call llrintf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1557,7 +1557,7 @@ define i64 @llrint_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llrintf@plt
+; RV64I-NEXT: call llrintf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1572,7 +1572,7 @@ define i64 @llround_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-NEXT: call llroundf@plt
+; RV32IF-NEXT: call llroundf
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1581,7 +1581,7 @@ define i64 @llround_f32(float %a) nounwind {
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: call llroundf@plt
+; RV32IZFINX-NEXT: call llroundf
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1600,7 +1600,7 @@ define i64 @llround_f32(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call llroundf@plt
+; RV32I-NEXT: call llroundf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1609,7 +1609,7 @@ define i64 @llround_f32(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call llroundf@plt
+; RV64I-NEXT: call llroundf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index b5d5f8e7c7e6..3779d39a753e 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -142,7 +142,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV32IF-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fmv.s fs0, fa0
; RV32IF-NEXT: addi a0, sp, 4
-; RV32IF-NEXT: call notdead@plt
+; RV32IF-NEXT: call notdead
; RV32IF-NEXT: flw fa5, 4(sp)
; RV32IF-NEXT: fadd.s fa0, fa5, fs0
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -157,7 +157,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV64IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV64IF-NEXT: fmv.s fs0, fa0
; RV64IF-NEXT: mv a0, sp
-; RV64IF-NEXT: call notdead@plt
+; RV64IF-NEXT: call notdead
; RV64IF-NEXT: flw fa5, 0(sp)
; RV64IF-NEXT: fadd.s fa0, fa5, fs0
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -172,7 +172,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: addi a0, sp, 4
-; RV32IZFINX-NEXT: call notdead@plt
+; RV32IZFINX-NEXT: call notdead
; RV32IZFINX-NEXT: lw a0, 4(sp)
; RV32IZFINX-NEXT: fadd.s a0, a0, s0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -187,7 +187,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: mv s0, a0
; RV64IZFINX-NEXT: addi a0, sp, 12
-; RV64IZFINX-NEXT: call notdead@plt
+; RV64IZFINX-NEXT: call notdead
; RV64IZFINX-NEXT: lw a0, 12(sp)
; RV64IZFINX-NEXT: fadd.s a0, a0, s0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -209,7 +209,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
; RV32IF-NEXT: fadd.s fa5, fa0, fa1
; RV32IF-NEXT: fsw fa5, 8(sp)
; RV32IF-NEXT: addi a0, sp, 8
-; RV32IF-NEXT: call notdead@plt
+; RV32IF-NEXT: call notdead
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -221,7 +221,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
; RV64IF-NEXT: fadd.s fa5, fa0, fa1
; RV64IF-NEXT: fsw fa5, 4(sp)
; RV64IF-NEXT: addi a0, sp, 4
-; RV64IF-NEXT: call notdead@plt
+; RV64IF-NEXT: call notdead
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -233,7 +233,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: fadd.s a0, a0, a1
; RV32IZFINX-NEXT: sw a0, 8(sp)
; RV32IZFINX-NEXT: addi a0, sp, 8
-; RV32IZFINX-NEXT: call notdead@plt
+; RV32IZFINX-NEXT: call notdead
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -245,7 +245,7 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: fadd.s a0, a0, a1
; RV64IZFINX-NEXT: sw a0, 4(sp)
; RV64IZFINX-NEXT: addi a0, sp, 4
-; RV64IZFINX-NEXT: call notdead@plt
+; RV64IZFINX-NEXT: call notdead
; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINX-NEXT: addi sp, sp, 16
; RV64IZFINX-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index d947d0f25cdd..5e99c7eb9056 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -53,7 +53,7 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fs0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB1_4
@@ -111,7 +111,7 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
@@ -197,7 +197,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV32IF-NEXT: fle.s a0, fa5, fs0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI3_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI3_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
@@ -240,7 +240,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI3_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI3_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
@@ -313,7 +313,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fs0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB5_4
@@ -371,7 +371,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
@@ -457,7 +457,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV32IF-NEXT: fle.s a0, fa5, fs0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI7_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI7_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
@@ -500,7 +500,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI7_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI7_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
@@ -573,7 +573,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fs0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB9_4
@@ -631,7 +631,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
@@ -717,7 +717,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV32IF-NEXT: fle.s a0, fa5, fs0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI11_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI11_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
@@ -760,7 +760,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI11_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI11_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
@@ -833,7 +833,7 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fs0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB13_4
@@ -891,7 +891,7 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
@@ -977,7 +977,7 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV32IF-NEXT: fle.s a0, fa5, fs0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI15_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI15_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
@@ -1020,7 +1020,7 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI15_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI15_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
@@ -1093,7 +1093,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fs0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB17_4
@@ -1151,7 +1151,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
@@ -1237,7 +1237,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV32IF-NEXT: fle.s a0, fa5, fs0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI19_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI19_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
@@ -1280,7 +1280,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI19_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI19_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
@@ -1353,7 +1353,7 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fle.s s0, fa5, fs0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lui a4, 524288
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: beqz s0, .LBB21_4
@@ -1411,7 +1411,7 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s s1, a0, s0
; RV32IZFINX-NEXT: neg s2, s1
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZFINX-NEXT: and a0, s2, a0
@@ -1497,7 +1497,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
; RV32IF-NEXT: fle.s a0, fa5, fs0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lui a2, %hi(.LCPI23_0)
; RV32IF-NEXT: flw fa5, %lo(.LCPI23_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
@@ -1540,7 +1540,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
; RV32IZFINX-NEXT: mv a0, s0
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lui a2, %hi(.LCPI23_0)
; RV32IZFINX-NEXT: lw a2, %lo(.LCPI23_0)(a2)
; RV32IZFINX-NEXT: and a0, s1, a0
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll
index ed50f867cdb8..1b1344843975 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll
@@ -100,7 +100,7 @@ define i64 @test_floor_si64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -125,7 +125,7 @@ define i64 @test_floor_si64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -231,7 +231,7 @@ define i64 @test_floor_ui64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -256,7 +256,7 @@ define i64 @test_floor_ui64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -362,7 +362,7 @@ define i64 @test_ceil_si64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -387,7 +387,7 @@ define i64 @test_ceil_si64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -493,7 +493,7 @@ define i64 @test_ceil_ui64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -518,7 +518,7 @@ define i64 @test_ceil_ui64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -624,7 +624,7 @@ define i64 @test_trunc_si64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -649,7 +649,7 @@ define i64 @test_trunc_si64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -755,7 +755,7 @@ define i64 @test_trunc_ui64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -780,7 +780,7 @@ define i64 @test_trunc_ui64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -886,7 +886,7 @@ define i64 @test_round_si64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -911,7 +911,7 @@ define i64 @test_round_si64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1017,7 +1017,7 @@ define i64 @test_round_ui64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1042,7 +1042,7 @@ define i64 @test_round_ui64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1148,7 +1148,7 @@ define i64 @test_roundeven_si64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixsfdi@plt
+; RV32IF-NEXT: call __fixsfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1173,7 +1173,7 @@ define i64 @test_roundeven_si64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixsfdi@plt
+; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
@@ -1279,7 +1279,7 @@ define i64 @test_roundeven_ui64(float %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunssfdi@plt
+; RV32IF-NEXT: call __fixunssfdi
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
@@ -1304,7 +1304,7 @@ define i64 @test_roundeven_ui64(float %x) {
; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: .cfi_offset ra, -4
-; RV32IZFINX-NEXT: call __fixunssfdi@plt
+; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINX-NEXT: addi sp, sp, 16
; RV32IZFINX-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/float-select-verify.ll b/llvm/test/CodeGen/RISCV/float-select-verify.ll
index b38560fa4136..cf1a2a89229d 100644
--- a/llvm/test/CodeGen/RISCV/float-select-verify.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-verify.ll
@@ -67,11 +67,11 @@ define dso_local void @buz(i1 %pred, float %a, float %b) {
; CHECK-NEXT: [[FMV_X_W:%[0-9]+]]:gpr = FMV_X_W killed [[PHI1]]
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[FMV_X_W]]
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) @bar, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) @bar, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
; CHECK-NEXT: $x10 = COPY [[FCVT_L_S]]
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) @foo, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) @foo, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
; CHECK-NEXT: PseudoRET
entry:
diff --git a/llvm/test/CodeGen/RISCV/float-zfa.ll b/llvm/test/CodeGen/RISCV/float-zfa.ll
index 52c9ac7333fc..e5196ead1f88 100644
--- a/llvm/test/CodeGen/RISCV/float-zfa.ll
+++ b/llvm/test/CodeGen/RISCV/float-zfa.ll
@@ -265,7 +265,7 @@ define void @fli_remat() {
; CHECK: # %bb.0:
; CHECK-NEXT: fli.s fa0, 1.0
; CHECK-NEXT: fli.s fa1, 1.0
-; CHECK-NEXT: tail foo@plt
+; CHECK-NEXT: tail foo
tail call void @foo(float 1.000000e+00, float 1.000000e+00)
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/fmax-fmin.ll b/llvm/test/CodeGen/RISCV/fmax-fmin.ll
index b67093d72439..9d5729802a0f 100644
--- a/llvm/test/CodeGen/RISCV/fmax-fmin.ll
+++ b/llvm/test/CodeGen/RISCV/fmax-fmin.ll
@@ -7,7 +7,7 @@ define float @maxnum_f32(float %x, float %y) nounwind {
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT: call fmaxf@plt
+; R32-NEXT: call fmaxf
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
@@ -16,7 +16,7 @@ define float @maxnum_f32(float %x, float %y) nounwind {
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT: call fmaxf@plt
+; R64-NEXT: call fmaxf
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
@@ -33,7 +33,7 @@ define float @maxnum_f32_fast(float %x, float %y) nounwind {
; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; R32-NEXT: mv s1, a1
; R32-NEXT: mv s0, a0
-; R32-NEXT: call __gtsf2@plt
+; R32-NEXT: call __gtsf2
; R32-NEXT: bgtz a0, .LBB1_2
; R32-NEXT: # %bb.1:
; R32-NEXT: mv s0, s1
@@ -53,7 +53,7 @@ define float @maxnum_f32_fast(float %x, float %y) nounwind {
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
-; R64-NEXT: call __gtsf2@plt
+; R64-NEXT: call __gtsf2
; R64-NEXT: bgtz a0, .LBB1_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
@@ -73,7 +73,7 @@ define double @maxnum_f64(double %x, double %y) nounwind {
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT: call fmax@plt
+; R32-NEXT: call fmax
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
@@ -82,7 +82,7 @@ define double @maxnum_f64(double %x, double %y) nounwind {
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT: call fmax@plt
+; R64-NEXT: call fmax
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
@@ -103,7 +103,7 @@ define double @maxnum_f64_nnan(double %x, double %y) nounwind {
; R32-NEXT: mv s2, a2
; R32-NEXT: mv s0, a1
; R32-NEXT: mv s3, a0
-; R32-NEXT: call __gtdf2@plt
+; R32-NEXT: call __gtdf2
; R32-NEXT: mv a1, a0
; R32-NEXT: mv a0, s3
; R32-NEXT: bgtz a1, .LBB3_2
@@ -113,7 +113,7 @@ define double @maxnum_f64_nnan(double %x, double %y) nounwind {
; R32-NEXT: mv a1, s0
; R32-NEXT: mv a2, s2
; R32-NEXT: mv a3, s1
-; R32-NEXT: call __gtdf2@plt
+; R32-NEXT: call __gtdf2
; R32-NEXT: bgtz a0, .LBB3_4
; R32-NEXT: # %bb.3:
; R32-NEXT: mv s0, s1
@@ -136,7 +136,7 @@ define double @maxnum_f64_nnan(double %x, double %y) nounwind {
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
-; R64-NEXT: call __gtdf2@plt
+; R64-NEXT: call __gtdf2
; R64-NEXT: bgtz a0, .LBB3_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
@@ -156,7 +156,7 @@ define float @minnum_f32(float %x, float %y) nounwind {
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT: call fminf@plt
+; R32-NEXT: call fminf
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
@@ -165,7 +165,7 @@ define float @minnum_f32(float %x, float %y) nounwind {
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT: call fminf@plt
+; R64-NEXT: call fminf
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
@@ -182,7 +182,7 @@ define float @minnum_f32_nnan(float %x, float %y) nounwind {
; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; R32-NEXT: mv s1, a1
; R32-NEXT: mv s0, a0
-; R32-NEXT: call __ltsf2@plt
+; R32-NEXT: call __ltsf2
; R32-NEXT: bltz a0, .LBB5_2
; R32-NEXT: # %bb.1:
; R32-NEXT: mv s0, s1
@@ -202,7 +202,7 @@ define float @minnum_f32_nnan(float %x, float %y) nounwind {
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
-; R64-NEXT: call __ltsf2@plt
+; R64-NEXT: call __ltsf2
; R64-NEXT: bltz a0, .LBB5_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
@@ -222,7 +222,7 @@ define double @minnum_f64(double %x, double %y) nounwind {
; R32: # %bb.0:
; R32-NEXT: addi sp, sp, -16
; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; R32-NEXT: call fmin@plt
+; R32-NEXT: call fmin
; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32-NEXT: addi sp, sp, 16
; R32-NEXT: ret
@@ -231,7 +231,7 @@ define double @minnum_f64(double %x, double %y) nounwind {
; R64: # %bb.0:
; R64-NEXT: addi sp, sp, -16
; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; R64-NEXT: call fmin@plt
+; R64-NEXT: call fmin
; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; R64-NEXT: addi sp, sp, 16
; R64-NEXT: ret
@@ -252,7 +252,7 @@ define double @minnum_f64_fast(double %x, double %y) nounwind {
; R32-NEXT: mv s2, a2
; R32-NEXT: mv s0, a1
; R32-NEXT: mv s3, a0
-; R32-NEXT: call __ltdf2@plt
+; R32-NEXT: call __ltdf2
; R32-NEXT: mv a1, a0
; R32-NEXT: mv a0, s3
; R32-NEXT: bltz a1, .LBB7_2
@@ -262,7 +262,7 @@ define double @minnum_f64_fast(double %x, double %y) nounwind {
; R32-NEXT: mv a1, s0
; R32-NEXT: mv a2, s2
; R32-NEXT: mv a3, s1
-; R32-NEXT: call __ltdf2@plt
+; R32-NEXT: call __ltdf2
; R32-NEXT: bltz a0, .LBB7_4
; R32-NEXT: # %bb.3:
; R32-NEXT: mv s0, s1
@@ -285,7 +285,7 @@ define double @minnum_f64_fast(double %x, double %y) nounwind {
; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; R64-NEXT: mv s1, a1
; R64-NEXT: mv s0, a0
-; R64-NEXT: call __ltdf2@plt
+; R64-NEXT: call __ltdf2
; R64-NEXT: bltz a0, .LBB7_2
; R64-NEXT: # %bb.1:
; R64-NEXT: mv s0, s1
diff --git a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
index 321857b2104e..7c2f775bca14 100644
--- a/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
+++ b/llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll
@@ -773,7 +773,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
; RV32I-NEXT: .LBB20_5: # %for.body
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call f@plt
+; RV32I-NEXT: call f
; RV32I-NEXT: lw a0, 12(s7)
; RV32I-NEXT: lw a1, 8(s7)
; RV32I-NEXT: add a0, a0, s4
@@ -838,7 +838,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
; RV32I-MEDIUM-NEXT: .LBB20_5: # %for.body
; RV32I-MEDIUM-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-MEDIUM-NEXT: mv a0, s0
-; RV32I-MEDIUM-NEXT: call f@plt
+; RV32I-MEDIUM-NEXT: call f
; RV32I-MEDIUM-NEXT: lw a0, 12(s7)
; RV32I-MEDIUM-NEXT: lw a1, 8(s7)
; RV32I-MEDIUM-NEXT: add a0, a0, s4
@@ -885,7 +885,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
; RV64I-NEXT: .LBB20_2: # %for.body
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call f@plt
+; RV64I-NEXT: call f
; RV64I-NEXT: ld a0, 8(s3)
; RV64I-NEXT: addi s1, s1, -1
; RV64I-NEXT: add s2, a0, s2
@@ -921,7 +921,7 @@ define i64 @fold_addi_from_different_bb(i64 %k, i64 %n, ptr %a) nounwind {
; RV64I-MEDIUM-NEXT: .LBB20_2: # %for.body
; RV64I-MEDIUM-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-MEDIUM-NEXT: mv a0, s0
-; RV64I-MEDIUM-NEXT: call f@plt
+; RV64I-MEDIUM-NEXT: call f
; RV64I-MEDIUM-NEXT: ld a0, 8(s3)
; RV64I-MEDIUM-NEXT: addi s1, s1, -1
; RV64I-MEDIUM-NEXT: add s2, a0, s2
diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll
index f2079e314d51..f6a53a9d76dd 100644
--- a/llvm/test/CodeGen/RISCV/forced-atomics.ll
+++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll
@@ -14,7 +14,7 @@ define i8 @load8(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_load_1@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_load_1
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -38,7 +38,7 @@ define i8 @load8(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_1@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_1
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -67,7 +67,7 @@ define void @store8(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a2, 5
; RV32-NO-ATOMIC-NEXT: li a1, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_store_1@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_store_1
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -91,7 +91,7 @@ define void @store8(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a2, 5
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_1@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_1
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -119,7 +119,7 @@ define i8 @rmw8(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_1@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_1
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -129,7 +129,7 @@ define i8 @rmw8(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_1@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_1
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -139,7 +139,7 @@ define i8 @rmw8(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_1@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_1
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -150,7 +150,7 @@ define i8 @rmw8(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_1@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_1
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -160,7 +160,7 @@ define i8 @rmw8(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_1@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_1
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -170,7 +170,7 @@ define i8 @rmw8(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_1@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_1
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -188,7 +188,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a2, 1
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1
; RV32-NO-ATOMIC-NEXT: lbu a0, 11(sp)
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -200,7 +200,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a2, 1
; RV32-ATOMIC-NEXT: li a1, 0
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_1@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_1
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -211,7 +211,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a2, 1
; RV32-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_1@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_1
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -225,7 +225,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a2, 1
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_1
; RV64-NO-ATOMIC-NEXT: lbu a0, 7(sp)
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -237,7 +237,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a2, 1
; RV64-ATOMIC-NEXT: li a1, 0
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_1@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_1
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -248,7 +248,7 @@ define i8 @cmpxchg8(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a2, 1
; RV64-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_1@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_1
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -263,7 +263,7 @@ define i16 @load16(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_load_2@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_load_2
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -287,7 +287,7 @@ define i16 @load16(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_2@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_2
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -316,7 +316,7 @@ define void @store16(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a2, 5
; RV32-NO-ATOMIC-NEXT: li a1, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_store_2@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_store_2
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -340,7 +340,7 @@ define void @store16(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a2, 5
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_2@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_2
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -368,7 +368,7 @@ define i16 @rmw16(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_2@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_2
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -378,7 +378,7 @@ define i16 @rmw16(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_2@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_2
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -388,7 +388,7 @@ define i16 @rmw16(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_2@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_2
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -399,7 +399,7 @@ define i16 @rmw16(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_2@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_2
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -409,7 +409,7 @@ define i16 @rmw16(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_2@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_2
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -419,7 +419,7 @@ define i16 @rmw16(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_2@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_2
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -437,7 +437,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a2, 1
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_2@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_2
; RV32-NO-ATOMIC-NEXT: lh a0, 10(sp)
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -449,7 +449,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a2, 1
; RV32-ATOMIC-NEXT: li a1, 0
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_2@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_2
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -460,7 +460,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a2, 1
; RV32-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_2@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_2
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -474,7 +474,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a2, 1
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_2@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_2
; RV64-NO-ATOMIC-NEXT: lh a0, 6(sp)
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -486,7 +486,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a2, 1
; RV64-ATOMIC-NEXT: li a1, 0
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_2@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_2
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -497,7 +497,7 @@ define i16 @cmpxchg16(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a2, 1
; RV64-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_2@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_2
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -512,7 +512,7 @@ define i32 @load32_unordered(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -532,7 +532,7 @@ define i32 @load32_unordered(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -556,7 +556,7 @@ define i32 @load32_monotonic(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -576,7 +576,7 @@ define i32 @load32_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -600,7 +600,7 @@ define i32 @load32_acquire(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 2
-; RV32-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -622,7 +622,7 @@ define i32 @load32_acquire(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 2
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -648,7 +648,7 @@ define i32 @load32_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_load_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -672,7 +672,7 @@ define i32 @load32_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -701,7 +701,7 @@ define void @store32_unordered(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 0
; RV32-NO-ATOMIC-NEXT: li a2, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_store_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -722,7 +722,7 @@ define void @store32_unordered(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
; RV64-NO-ATOMIC-NEXT: li a2, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -747,7 +747,7 @@ define void @store32_monotonic(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 0
; RV32-NO-ATOMIC-NEXT: li a2, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_store_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -768,7 +768,7 @@ define void @store32_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
; RV64-NO-ATOMIC-NEXT: li a2, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -793,7 +793,7 @@ define void @store32_release(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a2, 3
; RV32-NO-ATOMIC-NEXT: li a1, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_store_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -816,7 +816,7 @@ define void @store32_release(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a2, 3
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -843,7 +843,7 @@ define void @store32_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a2, 5
; RV32-NO-ATOMIC-NEXT: li a1, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_store_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -867,7 +867,7 @@ define void @store32_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a2, 5
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -895,7 +895,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -905,7 +905,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -915,7 +915,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -926,7 +926,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -936,7 +936,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -946,7 +946,7 @@ define i32 @rmw32_add_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -961,7 +961,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_add_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -971,7 +971,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_add_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -981,7 +981,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -992,7 +992,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -1002,7 +1002,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1012,7 +1012,7 @@ define i32 @rmw32_add_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1027,7 +1027,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_sub_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_sub_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -1037,7 +1037,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_sub_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_sub_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1047,7 +1047,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_sub_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_sub_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1058,7 +1058,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_sub_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_sub_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -1068,7 +1068,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_sub_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_sub_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1078,7 +1078,7 @@ define i32 @rmw32_sub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_sub_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_sub_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1093,7 +1093,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_and_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_and_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -1103,7 +1103,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_and_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_and_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1113,7 +1113,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_and_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_and_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1124,7 +1124,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_and_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_and_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -1134,7 +1134,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_and_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_and_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1144,7 +1144,7 @@ define i32 @rmw32_and_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_and_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_and_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1159,7 +1159,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_nand_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_nand_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -1169,7 +1169,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_nand_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_nand_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1179,7 +1179,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_nand_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_nand_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1190,7 +1190,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_nand_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_nand_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -1200,7 +1200,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_nand_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_nand_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1210,7 +1210,7 @@ define i32 @rmw32_nand_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_nand_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_nand_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1225,7 +1225,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_or_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_or_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -1235,7 +1235,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_or_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_or_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1245,7 +1245,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_or_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_or_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1256,7 +1256,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_or_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_or_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -1266,7 +1266,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_or_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_or_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1276,7 +1276,7 @@ define i32 @rmw32_or_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_or_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_or_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1291,7 +1291,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_xor_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_fetch_xor_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -1301,7 +1301,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_xor_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_xor_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1311,7 +1311,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_xor_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_xor_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1322,7 +1322,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_xor_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_xor_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -1332,7 +1332,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_xor_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_xor_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1342,7 +1342,7 @@ define i32 @rmw32_xor_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_xor_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_xor_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1366,7 +1366,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw a1, 4(sp)
; RV32-NO-ATOMIC-NEXT: bnez a0, .LBB23_4
; RV32-NO-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start
@@ -1389,7 +1389,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_max_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_max_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1399,7 +1399,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_max_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_max_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1419,7 +1419,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw a1, 12(sp)
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB23_4
; RV64-NO-ATOMIC-NEXT: .LBB23_2: # %atomicrmw.start
@@ -1443,7 +1443,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_max_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_max_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1453,7 +1453,7 @@ define i32 @rmw32_max_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_max_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_max_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1479,7 +1479,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw a1, 0(sp)
; RV32-NO-ATOMIC-NEXT: bnez a0, .LBB24_4
; RV32-NO-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start
@@ -1503,7 +1503,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_min_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_min_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1513,7 +1513,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_min_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_min_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1535,7 +1535,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw a1, 4(sp)
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB24_4
; RV64-NO-ATOMIC-NEXT: .LBB24_2: # %atomicrmw.start
@@ -1559,7 +1559,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_min_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_min_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1569,7 +1569,7 @@ define i32 @rmw32_min_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_min_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_min_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1594,7 +1594,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw a1, 4(sp)
; RV32-NO-ATOMIC-NEXT: beqz a0, .LBB25_1
; RV32-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -1609,7 +1609,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_umax_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_umax_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1619,7 +1619,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umax_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umax_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1639,7 +1639,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw a1, 12(sp)
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB25_4
; RV64-NO-ATOMIC-NEXT: .LBB25_2: # %atomicrmw.start
@@ -1663,7 +1663,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_umax_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_umax_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1673,7 +1673,7 @@ define i32 @rmw32_umax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umax_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umax_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1699,7 +1699,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw a1, 0(sp)
; RV32-NO-ATOMIC-NEXT: bnez a0, .LBB26_4
; RV32-NO-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start
@@ -1723,7 +1723,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_fetch_and_umin_4@plt
+; RV32-ATOMIC-NEXT: call __sync_fetch_and_umin_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1733,7 +1733,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umin_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umin_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1755,7 +1755,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw a1, 4(sp)
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB26_4
; RV64-NO-ATOMIC-NEXT: .LBB26_2: # %atomicrmw.start
@@ -1779,7 +1779,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_umin_4@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_umin_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1789,7 +1789,7 @@ define i32 @rmw32_umin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umin_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umin_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1804,7 +1804,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NO-ATOMIC-NEXT: li a1, 1
; RV32-NO-ATOMIC-NEXT: li a2, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_exchange_4
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV32-NO-ATOMIC-NEXT: ret
@@ -1814,7 +1814,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: addi sp, sp, -16
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a1, 1
-; RV32-ATOMIC-NEXT: call __sync_lock_test_and_set_4@plt
+; RV32-ATOMIC-NEXT: call __sync_lock_test_and_set_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -1824,7 +1824,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_lock_test_and_set_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_lock_test_and_set_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -1835,7 +1835,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_exchange_4
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -1845,7 +1845,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_lock_test_and_set_4@plt
+; RV64-ATOMIC-NEXT: call __sync_lock_test_and_set_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -1855,7 +1855,7 @@ define i32 @rmw32_xchg_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_lock_test_and_set_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_lock_test_and_set_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -1876,14 +1876,14 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NO-ATOMIC-NEXT: lui a1, 260096
; RV32-NO-ATOMIC-NEXT: mv a0, s1
-; RV32-NO-ATOMIC-NEXT: call __addsf3@plt
+; RV32-NO-ATOMIC-NEXT: call __addsf3
; RV32-NO-ATOMIC-NEXT: mv a2, a0
; RV32-NO-ATOMIC-NEXT: sw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: mv a1, sp
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: beqz a0, .LBB28_1
; RV32-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -1906,11 +1906,11 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-NEXT: mv s1, a0
; RV32-ATOMIC-NEXT: lui a1, 260096
-; RV32-ATOMIC-NEXT: call __addsf3@plt
+; RV32-ATOMIC-NEXT: call __addsf3
; RV32-ATOMIC-NEXT: mv a2, a0
; RV32-ATOMIC-NEXT: mv a0, s0
; RV32-ATOMIC-NEXT: mv a1, s1
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-NEXT: bne a0, s1, .LBB28_1
; RV32-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1931,11 +1931,11 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV32-ATOMIC-TRAILING-NEXT: lui a1, 260096
-; RV32-ATOMIC-TRAILING-NEXT: call __addsf3@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __addsf3
; RV32-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV32-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV32-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-TRAILING-NEXT: bne a0, s1, .LBB28_1
; RV32-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1956,14 +1956,14 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: lui a1, 260096
; RV64-NO-ATOMIC-NEXT: mv a0, s1
-; RV64-NO-ATOMIC-NEXT: call __addsf3@plt
+; RV64-NO-ATOMIC-NEXT: call __addsf3
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 4
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB28_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -1987,12 +1987,12 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: lui a1, 260096
; RV64-ATOMIC-NEXT: mv a0, s1
-; RV64-ATOMIC-NEXT: call __addsf3@plt
+; RV64-ATOMIC-NEXT: call __addsf3
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: sext.w s2, s1
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-NEXT: mv s1, a0
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB28_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2017,12 +2017,12 @@ define float @rmw32_fadd_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: lui a1, 260096
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT: call __addsf3@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __addsf3
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: sext.w s2, s1
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB28_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
@@ -2050,14 +2050,14 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NO-ATOMIC-NEXT: lui a1, 784384
; RV32-NO-ATOMIC-NEXT: mv a0, s1
-; RV32-NO-ATOMIC-NEXT: call __addsf3@plt
+; RV32-NO-ATOMIC-NEXT: call __addsf3
; RV32-NO-ATOMIC-NEXT: mv a2, a0
; RV32-NO-ATOMIC-NEXT: sw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: mv a1, sp
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: beqz a0, .LBB29_1
; RV32-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2080,11 +2080,11 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-NEXT: mv s1, a0
; RV32-ATOMIC-NEXT: lui a1, 784384
-; RV32-ATOMIC-NEXT: call __addsf3@plt
+; RV32-ATOMIC-NEXT: call __addsf3
; RV32-ATOMIC-NEXT: mv a2, a0
; RV32-ATOMIC-NEXT: mv a0, s0
; RV32-ATOMIC-NEXT: mv a1, s1
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-NEXT: bne a0, s1, .LBB29_1
; RV32-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2105,11 +2105,11 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV32-ATOMIC-TRAILING-NEXT: lui a1, 784384
-; RV32-ATOMIC-TRAILING-NEXT: call __addsf3@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __addsf3
; RV32-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV32-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV32-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-TRAILING-NEXT: bne a0, s1, .LBB29_1
; RV32-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2130,14 +2130,14 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: lui a1, 784384
; RV64-NO-ATOMIC-NEXT: mv a0, s1
-; RV64-NO-ATOMIC-NEXT: call __addsf3@plt
+; RV64-NO-ATOMIC-NEXT: call __addsf3
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 4
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB29_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2161,12 +2161,12 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: lui a1, 784384
; RV64-ATOMIC-NEXT: mv a0, s1
-; RV64-ATOMIC-NEXT: call __addsf3@plt
+; RV64-ATOMIC-NEXT: call __addsf3
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: sext.w s2, s1
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-NEXT: mv s1, a0
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB29_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2191,12 +2191,12 @@ define float @rmw32_fsub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: lui a1, 784384
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT: call __addsf3@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __addsf3
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: sext.w s2, s1
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB29_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
@@ -2224,14 +2224,14 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NO-ATOMIC-NEXT: lui a1, 260096
; RV32-NO-ATOMIC-NEXT: mv a0, s1
-; RV32-NO-ATOMIC-NEXT: call fminf@plt
+; RV32-NO-ATOMIC-NEXT: call fminf
; RV32-NO-ATOMIC-NEXT: mv a2, a0
; RV32-NO-ATOMIC-NEXT: sw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: mv a1, sp
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: beqz a0, .LBB30_1
; RV32-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2254,11 +2254,11 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-NEXT: mv s1, a0
; RV32-ATOMIC-NEXT: lui a1, 260096
-; RV32-ATOMIC-NEXT: call fminf@plt
+; RV32-ATOMIC-NEXT: call fminf
; RV32-ATOMIC-NEXT: mv a2, a0
; RV32-ATOMIC-NEXT: mv a0, s0
; RV32-ATOMIC-NEXT: mv a1, s1
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-NEXT: bne a0, s1, .LBB30_1
; RV32-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2279,11 +2279,11 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV32-ATOMIC-TRAILING-NEXT: lui a1, 260096
-; RV32-ATOMIC-TRAILING-NEXT: call fminf@plt
+; RV32-ATOMIC-TRAILING-NEXT: call fminf
; RV32-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV32-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV32-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-TRAILING-NEXT: bne a0, s1, .LBB30_1
; RV32-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2304,14 +2304,14 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: lui a1, 260096
; RV64-NO-ATOMIC-NEXT: mv a0, s1
-; RV64-NO-ATOMIC-NEXT: call fminf@plt
+; RV64-NO-ATOMIC-NEXT: call fminf
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 4
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB30_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2335,12 +2335,12 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: lui a1, 260096
; RV64-ATOMIC-NEXT: mv a0, s1
-; RV64-ATOMIC-NEXT: call fminf@plt
+; RV64-ATOMIC-NEXT: call fminf
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: sext.w s2, s1
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-NEXT: mv s1, a0
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB30_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2365,12 +2365,12 @@ define float @rmw32_fmin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: lui a1, 260096
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT: call fminf@plt
+; RV64-ATOMIC-TRAILING-NEXT: call fminf
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: sext.w s2, s1
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB30_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
@@ -2398,14 +2398,14 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-NO-ATOMIC-NEXT: lui a1, 260096
; RV32-NO-ATOMIC-NEXT: mv a0, s1
-; RV32-NO-ATOMIC-NEXT: call fmaxf@plt
+; RV32-NO-ATOMIC-NEXT: call fmaxf
; RV32-NO-ATOMIC-NEXT: mv a2, a0
; RV32-NO-ATOMIC-NEXT: sw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: mv a1, sp
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
; RV32-NO-ATOMIC-NEXT: mv a0, s0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw s1, 0(sp)
; RV32-NO-ATOMIC-NEXT: beqz a0, .LBB31_1
; RV32-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2428,11 +2428,11 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-NEXT: mv s1, a0
; RV32-ATOMIC-NEXT: lui a1, 260096
-; RV32-ATOMIC-NEXT: call fmaxf@plt
+; RV32-ATOMIC-NEXT: call fmaxf
; RV32-ATOMIC-NEXT: mv a2, a0
; RV32-ATOMIC-NEXT: mv a0, s0
; RV32-ATOMIC-NEXT: mv a1, s1
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-NEXT: bne a0, s1, .LBB31_1
; RV32-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2453,11 +2453,11 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV32-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV32-ATOMIC-TRAILING-NEXT: lui a1, 260096
-; RV32-ATOMIC-TRAILING-NEXT: call fmaxf@plt
+; RV32-ATOMIC-TRAILING-NEXT: call fmaxf
; RV32-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV32-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV32-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-TRAILING-NEXT: bne a0, s1, .LBB31_1
; RV32-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -2478,14 +2478,14 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: lui a1, 260096
; RV64-NO-ATOMIC-NEXT: mv a0, s1
-; RV64-NO-ATOMIC-NEXT: call fmaxf@plt
+; RV64-NO-ATOMIC-NEXT: call fmaxf
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 4
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw s1, 4(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB31_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2509,12 +2509,12 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: lui a1, 260096
; RV64-ATOMIC-NEXT: mv a0, s1
-; RV64-ATOMIC-NEXT: call fmaxf@plt
+; RV64-ATOMIC-NEXT: call fmaxf
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: sext.w s2, s1
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-NEXT: mv s1, a0
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB31_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -2539,12 +2539,12 @@ define float @rmw32_fmax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: lui a1, 260096
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s1
-; RV64-ATOMIC-TRAILING-NEXT: call fmaxf@plt
+; RV64-ATOMIC-TRAILING-NEXT: call fmaxf
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: sext.w s2, s1
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-TRAILING-NEXT: mv s1, a0
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB31_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
@@ -2569,7 +2569,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a2, 1
; RV32-NO-ATOMIC-NEXT: li a3, 0
; RV32-NO-ATOMIC-NEXT: li a4, 0
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw a0, 8(sp)
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -2581,7 +2581,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a2, 1
; RV32-ATOMIC-NEXT: li a1, 0
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -2592,7 +2592,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a2, 1
; RV32-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -2606,7 +2606,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a2, 1
; RV64-NO-ATOMIC-NEXT: li a3, 0
; RV64-NO-ATOMIC-NEXT: li a4, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw a0, 4(sp)
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -2618,7 +2618,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a2, 1
; RV64-ATOMIC-NEXT: li a1, 0
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -2629,7 +2629,7 @@ define i32 @cmpxchg32_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a2, 1
; RV64-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -2648,7 +2648,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
; RV32-NO-ATOMIC-NEXT: li a2, 1
; RV32-NO-ATOMIC-NEXT: li a3, 5
; RV32-NO-ATOMIC-NEXT: li a4, 5
-; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV32-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV32-NO-ATOMIC-NEXT: lw a0, 8(sp)
; RV32-NO-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -2660,7 +2660,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-NEXT: li a2, 1
; RV32-ATOMIC-NEXT: li a1, 0
-; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-NEXT: addi sp, sp, 16
; RV32-ATOMIC-NEXT: ret
@@ -2671,7 +2671,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
; RV32-ATOMIC-TRAILING-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ATOMIC-TRAILING-NEXT: li a2, 1
; RV32-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV32-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV32-ATOMIC-TRAILING-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV32-ATOMIC-TRAILING-NEXT: ret
@@ -2685,7 +2685,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a2, 1
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_4
; RV64-NO-ATOMIC-NEXT: lw a0, 4(sp)
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -2697,7 +2697,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a2, 1
; RV64-ATOMIC-NEXT: li a1, 0
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -2708,7 +2708,7 @@ define i32 @cmpxchg32_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a2, 1
; RV64-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_4
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -2723,7 +2723,7 @@ define i64 @load64_unordered(ptr %p) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a1, 0
-; RV32-NEXT: call __atomic_load_8@plt
+; RV32-NEXT: call __atomic_load_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2733,7 +2733,7 @@ define i64 @load64_unordered(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -2757,7 +2757,7 @@ define i64 @load64_monotonic(ptr %p) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a1, 0
-; RV32-NEXT: call __atomic_load_8@plt
+; RV32-NEXT: call __atomic_load_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2767,7 +2767,7 @@ define i64 @load64_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -2791,7 +2791,7 @@ define i64 @load64_acquire(ptr %p) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a1, 2
-; RV32-NEXT: call __atomic_load_8@plt
+; RV32-NEXT: call __atomic_load_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2801,7 +2801,7 @@ define i64 @load64_acquire(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 2
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -2827,7 +2827,7 @@ define i64 @load64_seq_cst(ptr %p) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a1, 5
-; RV32-NEXT: call __atomic_load_8@plt
+; RV32-NEXT: call __atomic_load_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2837,7 +2837,7 @@ define i64 @load64_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: addi sp, sp, -16
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_load_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_load_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -2867,7 +2867,7 @@ define void @store64_unordered(ptr %p) nounwind {
; RV32-NEXT: li a1, 0
; RV32-NEXT: li a2, 0
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __atomic_store_8@plt
+; RV32-NEXT: call __atomic_store_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2878,7 +2878,7 @@ define void @store64_unordered(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
; RV64-NO-ATOMIC-NEXT: li a2, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -2904,7 +2904,7 @@ define void @store64_monotonic(ptr %p) nounwind {
; RV32-NEXT: li a1, 0
; RV32-NEXT: li a2, 0
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __atomic_store_8@plt
+; RV32-NEXT: call __atomic_store_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2915,7 +2915,7 @@ define void @store64_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 0
; RV64-NO-ATOMIC-NEXT: li a2, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -2941,7 +2941,7 @@ define void @store64_release(ptr %p) nounwind {
; RV32-NEXT: li a3, 3
; RV32-NEXT: li a1, 0
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_store_8@plt
+; RV32-NEXT: call __atomic_store_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2952,7 +2952,7 @@ define void @store64_release(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a2, 3
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -2980,7 +2980,7 @@ define void @store64(ptr %p) nounwind {
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a1, 0
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_store_8@plt
+; RV32-NEXT: call __atomic_store_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -2991,7 +2991,7 @@ define void @store64(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a2, 5
; RV64-NO-ATOMIC-NEXT: li a1, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_store_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_store_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3020,7 +3020,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a2, 0
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __atomic_fetch_add_8@plt
+; RV32-NEXT: call __atomic_fetch_add_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3031,7 +3031,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3041,7 +3041,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3051,7 +3051,7 @@ define i64 @rmw64_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3067,7 +3067,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_fetch_add_8@plt
+; RV32-NEXT: call __atomic_fetch_add_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3078,7 +3078,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_add_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3088,7 +3088,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_add_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3098,7 +3098,7 @@ define i64 @rmw64_add_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_add_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3114,7 +3114,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_fetch_sub_8@plt
+; RV32-NEXT: call __atomic_fetch_sub_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3125,7 +3125,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_sub_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_sub_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3135,7 +3135,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_sub_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_sub_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3145,7 +3145,7 @@ define i64 @rmw64_sub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_sub_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_sub_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3161,7 +3161,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_fetch_and_8@plt
+; RV32-NEXT: call __atomic_fetch_and_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3172,7 +3172,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_and_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_and_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3182,7 +3182,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_and_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_and_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3192,7 +3192,7 @@ define i64 @rmw64_and_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_and_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_and_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3208,7 +3208,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_fetch_nand_8@plt
+; RV32-NEXT: call __atomic_fetch_nand_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3219,7 +3219,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_nand_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_nand_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3229,7 +3229,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_nand_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_nand_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3239,7 +3239,7 @@ define i64 @rmw64_nand_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_nand_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_nand_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3255,7 +3255,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_fetch_or_8@plt
+; RV32-NEXT: call __atomic_fetch_or_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3266,7 +3266,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_or_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_or_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3276,7 +3276,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_or_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_or_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3286,7 +3286,7 @@ define i64 @rmw64_or_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_or_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_or_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3302,7 +3302,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_fetch_xor_8@plt
+; RV32-NEXT: call __atomic_fetch_xor_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3313,7 +3313,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_xor_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_fetch_xor_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3323,7 +3323,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_xor_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_xor_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3333,7 +3333,7 @@ define i64 @rmw64_xor_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_xor_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_xor_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3361,7 +3361,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a1, 4(sp)
; RV32-NEXT: lw a4, 0(sp)
; RV32-NEXT: bnez a0, .LBB49_6
@@ -3405,7 +3405,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld a1, 8(sp)
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB49_4
; RV64-NO-ATOMIC-NEXT: .LBB49_2: # %atomicrmw.start
@@ -3428,7 +3428,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_max_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_max_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3438,7 +3438,7 @@ define i64 @rmw64_max_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_max_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_max_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3466,7 +3466,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a1, 4(sp)
; RV32-NEXT: lw a4, 0(sp)
; RV32-NEXT: bnez a0, .LBB50_6
@@ -3511,7 +3511,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld a1, 0(sp)
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB50_4
; RV64-NO-ATOMIC-NEXT: .LBB50_2: # %atomicrmw.start
@@ -3535,7 +3535,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_min_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_min_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3545,7 +3545,7 @@ define i64 @rmw64_min_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_min_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_min_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3573,7 +3573,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a1, 4(sp)
; RV32-NEXT: lw a4, 0(sp)
; RV32-NEXT: bnez a0, .LBB51_4
@@ -3612,7 +3612,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld a1, 8(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB51_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -3627,7 +3627,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_umax_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_umax_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3637,7 +3637,7 @@ define i64 @rmw64_umax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umax_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umax_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3665,7 +3665,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a1, 4(sp)
; RV32-NEXT: lw a4, 0(sp)
; RV32-NEXT: bnez a0, .LBB52_4
@@ -3704,7 +3704,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld a1, 0(sp)
; RV64-NO-ATOMIC-NEXT: bnez a0, .LBB52_4
; RV64-NO-ATOMIC-NEXT: .LBB52_2: # %atomicrmw.start
@@ -3728,7 +3728,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_fetch_and_umin_8@plt
+; RV64-ATOMIC-NEXT: call __sync_fetch_and_umin_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3738,7 +3738,7 @@ define i64 @rmw64_umin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umin_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_fetch_and_umin_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3754,7 +3754,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a1, 1
; RV32-NEXT: li a3, 5
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __atomic_exchange_8@plt
+; RV32-NEXT: call __atomic_exchange_8
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -3765,7 +3765,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NO-ATOMIC-NEXT: li a1, 1
; RV64-NO-ATOMIC-NEXT: li a2, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_exchange_8
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
; RV64-NO-ATOMIC-NEXT: ret
@@ -3775,7 +3775,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: addi sp, sp, -16
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a1, 1
-; RV64-ATOMIC-NEXT: call __sync_lock_test_and_set_8@plt
+; RV64-ATOMIC-NEXT: call __sync_lock_test_and_set_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -3785,7 +3785,7 @@ define i64 @rmw64_xchg_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, -16
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a1, 1
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_lock_test_and_set_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_lock_test_and_set_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -3810,7 +3810,7 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
; RV32-NEXT: mv a0, s2
; RV32-NEXT: mv a1, s1
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __adddf3@plt
+; RV32-NEXT: call __adddf3
; RV32-NEXT: mv a2, a0
; RV32-NEXT: mv a3, a1
; RV32-NEXT: sw s2, 8(sp)
@@ -3819,7 +3819,7 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw s1, 12(sp)
; RV32-NEXT: lw s2, 8(sp)
; RV32-NEXT: beqz a0, .LBB54_1
@@ -3848,14 +3848,14 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: mv a0, s2
; RV64-NO-ATOMIC-NEXT: mv a1, s1
-; RV64-NO-ATOMIC-NEXT: call __adddf3@plt
+; RV64-NO-ATOMIC-NEXT: call __adddf3
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sd s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 8
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB54_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -3882,11 +3882,11 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: mv s2, a0
; RV64-ATOMIC-NEXT: mv a1, s1
-; RV64-ATOMIC-NEXT: call __adddf3@plt
+; RV64-ATOMIC-NEXT: call __adddf3
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB54_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -3911,11 +3911,11 @@ define double @rmw64_fadd_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: mv s2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT: call __adddf3@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __adddf3
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB54_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -3945,7 +3945,7 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
; RV32-NEXT: mv a0, s2
; RV32-NEXT: mv a1, s1
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call __adddf3@plt
+; RV32-NEXT: call __adddf3
; RV32-NEXT: mv a2, a0
; RV32-NEXT: mv a3, a1
; RV32-NEXT: sw s2, 8(sp)
@@ -3954,7 +3954,7 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw s1, 12(sp)
; RV32-NEXT: lw s2, 8(sp)
; RV32-NEXT: beqz a0, .LBB55_1
@@ -3983,14 +3983,14 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: mv a0, s2
; RV64-NO-ATOMIC-NEXT: mv a1, s1
-; RV64-NO-ATOMIC-NEXT: call __adddf3@plt
+; RV64-NO-ATOMIC-NEXT: call __adddf3
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sd s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 8
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB55_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -4017,11 +4017,11 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: mv s2, a0
; RV64-ATOMIC-NEXT: mv a1, s1
-; RV64-ATOMIC-NEXT: call __adddf3@plt
+; RV64-ATOMIC-NEXT: call __adddf3
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB55_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -4046,11 +4046,11 @@ define double @rmw64_fsub_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: mv s2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT: call __adddf3@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __adddf3
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB55_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -4080,7 +4080,7 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
; RV32-NEXT: mv a0, s2
; RV32-NEXT: mv a1, s1
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call fmin@plt
+; RV32-NEXT: call fmin
; RV32-NEXT: mv a2, a0
; RV32-NEXT: mv a3, a1
; RV32-NEXT: sw s2, 8(sp)
@@ -4089,7 +4089,7 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw s1, 12(sp)
; RV32-NEXT: lw s2, 8(sp)
; RV32-NEXT: beqz a0, .LBB56_1
@@ -4118,14 +4118,14 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: mv a0, s2
; RV64-NO-ATOMIC-NEXT: mv a1, s1
-; RV64-NO-ATOMIC-NEXT: call fmin@plt
+; RV64-NO-ATOMIC-NEXT: call fmin
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sd s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 8
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB56_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -4152,11 +4152,11 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: mv s2, a0
; RV64-ATOMIC-NEXT: mv a1, s1
-; RV64-ATOMIC-NEXT: call fmin@plt
+; RV64-ATOMIC-NEXT: call fmin
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB56_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -4181,11 +4181,11 @@ define double @rmw64_fmin_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: mv s2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT: call fmin@plt
+; RV64-ATOMIC-TRAILING-NEXT: call fmin
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB56_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -4215,7 +4215,7 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
; RV32-NEXT: mv a0, s2
; RV32-NEXT: mv a1, s1
; RV32-NEXT: li a2, 0
-; RV32-NEXT: call fmax@plt
+; RV32-NEXT: call fmax
; RV32-NEXT: mv a2, a0
; RV32-NEXT: mv a3, a1
; RV32-NEXT: sw s2, 8(sp)
@@ -4224,7 +4224,7 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a0, s0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw s1, 12(sp)
; RV32-NEXT: lw s2, 8(sp)
; RV32-NEXT: beqz a0, .LBB57_1
@@ -4253,14 +4253,14 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-NO-ATOMIC-NEXT: mv a0, s2
; RV64-NO-ATOMIC-NEXT: mv a1, s1
-; RV64-NO-ATOMIC-NEXT: call fmax@plt
+; RV64-NO-ATOMIC-NEXT: call fmax
; RV64-NO-ATOMIC-NEXT: mv a2, a0
; RV64-NO-ATOMIC-NEXT: sd s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: addi a1, sp, 8
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
; RV64-NO-ATOMIC-NEXT: mv a0, s0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld s2, 8(sp)
; RV64-NO-ATOMIC-NEXT: beqz a0, .LBB57_1
; RV64-NO-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
@@ -4287,11 +4287,11 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-NEXT: mv s2, a0
; RV64-ATOMIC-NEXT: mv a1, s1
-; RV64-ATOMIC-NEXT: call fmax@plt
+; RV64-ATOMIC-NEXT: call fmax
; RV64-ATOMIC-NEXT: mv a2, a0
; RV64-ATOMIC-NEXT: mv a0, s0
; RV64-ATOMIC-NEXT: mv a1, s2
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-NEXT: bne a0, s2, .LBB57_1
; RV64-ATOMIC-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -4316,11 +4316,11 @@ define double @rmw64_fmax_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: # =>This Inner Loop Header: Depth=1
; RV64-ATOMIC-TRAILING-NEXT: mv s2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s1
-; RV64-ATOMIC-TRAILING-NEXT: call fmax@plt
+; RV64-ATOMIC-TRAILING-NEXT: call fmax
; RV64-ATOMIC-TRAILING-NEXT: mv a2, a0
; RV64-ATOMIC-TRAILING-NEXT: mv a0, s0
; RV64-ATOMIC-TRAILING-NEXT: mv a1, s2
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-TRAILING-NEXT: bne a0, s2, .LBB57_1
; RV64-ATOMIC-TRAILING-NEXT: # %bb.2: # %atomicrmw.end
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -4345,7 +4345,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
; RV32-NEXT: li a3, 0
; RV32-NEXT: li a4, 0
; RV32-NEXT: li a5, 0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a1, 4(sp)
; RV32-NEXT: lw a0, 0(sp)
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4361,7 +4361,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a2, 1
; RV64-NO-ATOMIC-NEXT: li a3, 0
; RV64-NO-ATOMIC-NEXT: li a4, 0
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld a0, 0(sp)
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -4373,7 +4373,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a2, 1
; RV64-ATOMIC-NEXT: li a1, 0
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -4384,7 +4384,7 @@ define i64 @cmpxchg64_monotonic(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a2, 1
; RV64-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -4405,7 +4405,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __atomic_compare_exchange_8@plt
+; RV32-NEXT: call __atomic_compare_exchange_8
; RV32-NEXT: lw a1, 4(sp)
; RV32-NEXT: lw a0, 0(sp)
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4421,7 +4421,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
; RV64-NO-ATOMIC-NEXT: li a2, 1
; RV64-NO-ATOMIC-NEXT: li a3, 5
; RV64-NO-ATOMIC-NEXT: li a4, 5
-; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8@plt
+; RV64-NO-ATOMIC-NEXT: call __atomic_compare_exchange_8
; RV64-NO-ATOMIC-NEXT: ld a0, 0(sp)
; RV64-NO-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NO-ATOMIC-NEXT: addi sp, sp, 16
@@ -4433,7 +4433,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-NEXT: li a2, 1
; RV64-ATOMIC-NEXT: li a1, 0
-; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-NEXT: addi sp, sp, 16
; RV64-ATOMIC-NEXT: ret
@@ -4444,7 +4444,7 @@ define i64 @cmpxchg64_seq_cst(ptr %p) nounwind {
; RV64-ATOMIC-TRAILING-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ATOMIC-TRAILING-NEXT: li a2, 1
; RV64-ATOMIC-TRAILING-NEXT: li a1, 0
-; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8@plt
+; RV64-ATOMIC-TRAILING-NEXT: call __sync_val_compare_and_swap_8
; RV64-ATOMIC-TRAILING-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ATOMIC-TRAILING-NEXT: addi sp, sp, 16
; RV64-ATOMIC-TRAILING-NEXT: ret
@@ -4463,7 +4463,7 @@ define i128 @load128(ptr %p) nounwind {
; RV32-NEXT: li a0, 16
; RV32-NEXT: addi a2, sp, 8
; RV32-NEXT: li a3, 5
-; RV32-NEXT: call __atomic_load@plt
+; RV32-NEXT: call __atomic_load
; RV32-NEXT: lw a0, 20(sp)
; RV32-NEXT: lw a1, 16(sp)
; RV32-NEXT: lw a2, 12(sp)
@@ -4482,7 +4482,7 @@ define i128 @load128(ptr %p) nounwind {
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: li a1, 5
-; RV64-NEXT: call __atomic_load_16@plt
+; RV64-NEXT: call __atomic_load_16
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -4503,7 +4503,7 @@ define void @store128(ptr %p) nounwind {
; RV32-NEXT: li a0, 16
; RV32-NEXT: addi a2, sp, 8
; RV32-NEXT: li a3, 5
-; RV32-NEXT: call __atomic_store@plt
+; RV32-NEXT: call __atomic_store
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 32
; RV32-NEXT: ret
@@ -4515,7 +4515,7 @@ define void @store128(ptr %p) nounwind {
; RV64-NEXT: li a3, 5
; RV64-NEXT: li a1, 0
; RV64-NEXT: li a2, 0
-; RV64-NEXT: call __atomic_store_16@plt
+; RV64-NEXT: call __atomic_store_16
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -4560,7 +4560,7 @@ define i128 @rmw128(ptr %p) nounwind {
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
; RV32-NEXT: mv a1, s0
-; RV32-NEXT: call __atomic_compare_exchange@plt
+; RV32-NEXT: call __atomic_compare_exchange
; RV32-NEXT: lw a1, 28(sp)
; RV32-NEXT: lw a2, 24(sp)
; RV32-NEXT: lw a3, 20(sp)
@@ -4584,7 +4584,7 @@ define i128 @rmw128(ptr %p) nounwind {
; RV64-NEXT: li a1, 1
; RV64-NEXT: li a3, 5
; RV64-NEXT: li a2, 0
-; RV64-NEXT: call __atomic_fetch_add_16@plt
+; RV64-NEXT: call __atomic_fetch_add_16
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -4613,7 +4613,7 @@ define i128 @cmpxchg128(ptr %p) nounwind {
; RV32-NEXT: addi a3, sp, 8
; RV32-NEXT: li a4, 5
; RV32-NEXT: li a5, 5
-; RV32-NEXT: call __atomic_compare_exchange@plt
+; RV32-NEXT: call __atomic_compare_exchange
; RV32-NEXT: lw a0, 36(sp)
; RV32-NEXT: lw a1, 32(sp)
; RV32-NEXT: lw a2, 28(sp)
@@ -4638,7 +4638,7 @@ define i128 @cmpxchg128(ptr %p) nounwind {
; RV64-NEXT: li a4, 5
; RV64-NEXT: li a5, 5
; RV64-NEXT: li a3, 0
-; RV64-NEXT: call __atomic_compare_exchange_16@plt
+; RV64-NEXT: call __atomic_compare_exchange_16
; RV64-NEXT: ld a1, 8(sp)
; RV64-NEXT: ld a0, 0(sp)
; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/fp128.ll b/llvm/test/CodeGen/RISCV/fp128.ll
index 77a7914cdca6..611a70c0ad8b 100644
--- a/llvm/test/CodeGen/RISCV/fp128.ll
+++ b/llvm/test/CodeGen/RISCV/fp128.ll
@@ -33,7 +33,7 @@ define i32 @test_load_and_cmp() nounwind {
; RV32I-NEXT: addi a0, sp, 24
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: sw a2, 24(sp)
-; RV32I-NEXT: call __netf2@plt
+; RV32I-NEXT: call __netf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 48
@@ -71,7 +71,7 @@ define i32 @test_add_and_fptosi() nounwind {
; RV32I-NEXT: addi a1, sp, 40
; RV32I-NEXT: addi a2, sp, 24
; RV32I-NEXT: sw a3, 40(sp)
-; RV32I-NEXT: call __addtf3@plt
+; RV32I-NEXT: call __addtf3
; RV32I-NEXT: lw a1, 56(sp)
; RV32I-NEXT: lw a0, 60(sp)
; RV32I-NEXT: lw a2, 64(sp)
@@ -81,7 +81,7 @@ define i32 @test_add_and_fptosi() nounwind {
; RV32I-NEXT: sw a0, 12(sp)
; RV32I-NEXT: addi a0, sp, 8
; RV32I-NEXT: sw a1, 8(sp)
-; RV32I-NEXT: call __fixtfsi@plt
+; RV32I-NEXT: call __fixtfsi
; RV32I-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 80
; RV32I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/fp16-promote.ll b/llvm/test/CodeGen/RISCV/fp16-promote.ll
index 2a03746b1f7e..31842316606c 100644
--- a/llvm/test/CodeGen/RISCV/fp16-promote.ll
+++ b/llvm/test/CodeGen/RISCV/fp16-promote.ll
@@ -19,7 +19,7 @@ define float @test_fpextend_float(ptr %p) nounwind {
; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: lhu a0, 0(a0)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -35,7 +35,7 @@ define double @test_fpextend_double(ptr %p) nounwind {
; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: lhu a0, 0(a0)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fcvt.d.s fa0, fa0
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
@@ -52,7 +52,7 @@ define void @test_fptrunc_float(float %f, ptr %p) nounwind {
; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK-NEXT: mv s0, a0
-; CHECK-NEXT: call __truncsfhf2@plt
+; CHECK-NEXT: call __truncsfhf2
; CHECK-NEXT: fmv.x.w a0, fa0
; CHECK-NEXT: sh a0, 0(s0)
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -71,7 +71,7 @@ define void @test_fptrunc_double(double %d, ptr %p) nounwind {
; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK-NEXT: mv s0, a0
-; CHECK-NEXT: call __truncdfhf2@plt
+; CHECK-NEXT: call __truncdfhf2
; CHECK-NEXT: fmv.x.w a0, fa0
; CHECK-NEXT: sh a0, 0(s0)
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -96,12 +96,12 @@ define void @test_fadd(ptr %p, ptr %q) nounwind {
; CHECK-NEXT: lhu a1, 0(a1)
; CHECK-NEXT: fmv.w.x fs0, a0
; CHECK-NEXT: fmv.w.x fa0, a1
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs1, fa0
; CHECK-NEXT: fmv.s fa0, fs0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fadd.s fa0, fa0, fs1
-; CHECK-NEXT: call __truncsfhf2@plt
+; CHECK-NEXT: call __truncsfhf2
; CHECK-NEXT: fmv.x.w a0, fa0
; CHECK-NEXT: sh a0, 0(s0)
; CHECK-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -130,12 +130,12 @@ define void @test_fmul(ptr %p, ptr %q) nounwind {
; CHECK-NEXT: lhu a1, 0(a1)
; CHECK-NEXT: fmv.w.x fs0, a0
; CHECK-NEXT: fmv.w.x fa0, a1
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs1, fa0
; CHECK-NEXT: fmv.s fa0, fs0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmul.s fa0, fa0, fs1
-; CHECK-NEXT: call __truncsfhf2@plt
+; CHECK-NEXT: call __truncsfhf2
; CHECK-NEXT: fmv.x.w a0, fa0
; CHECK-NEXT: sh a0, 0(s0)
; CHECK-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
index b091b0613c0f..3880ac9f4ec6 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll
@@ -17,7 +17,7 @@ define i32 @stest_f64i32(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfdi@plt
+; RV32IF-NEXT: call __fixdfdi
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: addi a3, a2, -1
; RV32IF-NEXT: beqz a1, .LBB0_2
@@ -56,7 +56,7 @@ define i32 @stest_f64i32(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfdi@plt
+; RV64IF-NEXT: call __fixdfdi
; RV64IF-NEXT: lui a1, 524288
; RV64IF-NEXT: addiw a2, a1, -1
; RV64IF-NEXT: blt a0, a2, .LBB0_2
@@ -113,7 +113,7 @@ define i32 @utest_f64i32(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunsdfdi@plt
+; RV32IF-NEXT: call __fixunsdfdi
; RV32IF-NEXT: sltiu a2, a0, -1
; RV32IF-NEXT: seqz a1, a1
; RV32IF-NEXT: and a1, a1, a2
@@ -129,7 +129,7 @@ define i32 @utest_f64i32(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixunsdfdi@plt
+; RV64IF-NEXT: call __fixunsdfdi
; RV64IF-NEXT: li a1, -1
; RV64IF-NEXT: srli a1, a1, 32
; RV64IF-NEXT: bltu a0, a1, .LBB1_2
@@ -174,7 +174,7 @@ define i32 @ustest_f64i32(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfdi@plt
+; RV32IF-NEXT: call __fixdfdi
; RV32IF-NEXT: beqz a1, .LBB2_2
; RV32IF-NEXT: # %bb.1: # %entry
; RV32IF-NEXT: slti a2, a1, 0
@@ -205,7 +205,7 @@ define i32 @ustest_f64i32(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfdi@plt
+; RV64IF-NEXT: call __fixdfdi
; RV64IF-NEXT: li a1, -1
; RV64IF-NEXT: srli a1, a1, 32
; RV64IF-NEXT: blt a0, a1, .LBB2_2
@@ -355,8 +355,8 @@ define i32 @stest_f16i32(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
-; RV32-NEXT: call __fixsfdi@plt
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: call __fixsfdi
; RV32-NEXT: lui a2, 524288
; RV32-NEXT: addi a3, a2, -1
; RV32-NEXT: beqz a1, .LBB6_2
@@ -395,7 +395,7 @@ define i32 @stest_f16i32(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: lui a1, 524288
; RV64-NEXT: addiw a2, a1, -1
@@ -427,8 +427,8 @@ define i32 @utesth_f16i32(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
-; RV32-NEXT: call __fixunssfdi@plt
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: call __fixunssfdi
; RV32-NEXT: sltiu a2, a0, -1
; RV32-NEXT: seqz a1, a1
; RV32-NEXT: and a1, a1, a2
@@ -444,7 +444,7 @@ define i32 @utesth_f16i32(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64-NEXT: li a1, -1
; RV64-NEXT: srli a1, a1, 32
@@ -470,8 +470,8 @@ define i32 @ustest_f16i32(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
-; RV32-NEXT: call __fixsfdi@plt
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: call __fixsfdi
; RV32-NEXT: beqz a1, .LBB8_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: slti a2, a1, 0
@@ -502,7 +502,7 @@ define i32 @ustest_f16i32(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: li a1, -1
; RV64-NEXT: srli a1, a1, 32
@@ -535,7 +535,7 @@ define i16 @stest_f64i16(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfsi@plt
+; RV32IF-NEXT: call __fixdfsi
; RV32IF-NEXT: lui a1, 8
; RV32IF-NEXT: addi a1, a1, -1
; RV32IF-NEXT: blt a0, a1, .LBB9_2
@@ -557,7 +557,7 @@ define i16 @stest_f64i16(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfsi@plt
+; RV64IF-NEXT: call __fixdfsi
; RV64IF-NEXT: lui a1, 8
; RV64IF-NEXT: addiw a1, a1, -1
; RV64IF-NEXT: blt a0, a1, .LBB9_2
@@ -627,7 +627,7 @@ define i16 @utest_f64i16(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunsdfsi@plt
+; RV32IF-NEXT: call __fixunsdfsi
; RV32IF-NEXT: lui a1, 16
; RV32IF-NEXT: addi a1, a1, -1
; RV32IF-NEXT: bltu a0, a1, .LBB10_2
@@ -644,7 +644,7 @@ define i16 @utest_f64i16(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixunsdfsi@plt
+; RV64IF-NEXT: call __fixunsdfsi
; RV64IF-NEXT: lui a1, 16
; RV64IF-NEXT: addiw a1, a1, -1
; RV64IF-NEXT: bltu a0, a1, .LBB10_2
@@ -691,7 +691,7 @@ define i16 @ustest_f64i16(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfsi@plt
+; RV32IF-NEXT: call __fixdfsi
; RV32IF-NEXT: lui a1, 16
; RV32IF-NEXT: addi a1, a1, -1
; RV32IF-NEXT: blt a0, a1, .LBB11_2
@@ -711,7 +711,7 @@ define i16 @ustest_f64i16(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfsi@plt
+; RV64IF-NEXT: call __fixdfsi
; RV64IF-NEXT: lui a1, 16
; RV64IF-NEXT: addiw a1, a1, -1
; RV64IF-NEXT: blt a0, a1, .LBB11_2
@@ -885,7 +885,7 @@ define i16 @stest_f16i16(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-NEXT: lui a1, 8
; RV32-NEXT: addi a1, a1, -1
@@ -908,7 +908,7 @@ define i16 @stest_f16i16(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: lui a1, 8
; RV64-NEXT: addiw a1, a1, -1
@@ -941,7 +941,7 @@ define i16 @utesth_f16i16(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32-NEXT: lui a1, 16
; RV32-NEXT: addi a1, a1, -1
@@ -959,7 +959,7 @@ define i16 @utesth_f16i16(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64-NEXT: lui a1, 16
; RV64-NEXT: addiw a1, a1, -1
@@ -985,7 +985,7 @@ define i16 @ustest_f16i16(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-NEXT: lui a1, 16
; RV32-NEXT: addi a1, a1, -1
@@ -1006,7 +1006,7 @@ define i16 @ustest_f16i16(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: lui a1, 16
; RV64-NEXT: addiw a1, a1, -1
@@ -1042,7 +1042,7 @@ define i64 @stest_f64i64(double %x) {
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
-; RV32IF-NEXT: call __fixdfti@plt
+; RV32IF-NEXT: call __fixdfti
; RV32IF-NEXT: lw a0, 20(sp)
; RV32IF-NEXT: lw a2, 16(sp)
; RV32IF-NEXT: lw a1, 12(sp)
@@ -1101,7 +1101,7 @@ define i64 @stest_f64i64(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfti@plt
+; RV64IF-NEXT: call __fixdfti
; RV64IF-NEXT: li a2, -1
; RV64IF-NEXT: srli a3, a2, 1
; RV64IF-NEXT: beqz a1, .LBB18_2
@@ -1141,7 +1141,7 @@ define i64 @stest_f64i64(double %x) {
; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call __fixdfti@plt
+; RV32IFD-NEXT: call __fixdfti
; RV32IFD-NEXT: lw a0, 20(sp)
; RV32IFD-NEXT: lw a2, 16(sp)
; RV32IFD-NEXT: lw a1, 12(sp)
@@ -1222,7 +1222,7 @@ define i64 @utest_f64i64(double %x) {
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
-; RV32IF-NEXT: call __fixunsdfti@plt
+; RV32IF-NEXT: call __fixunsdfti
; RV32IF-NEXT: lw a0, 16(sp)
; RV32IF-NEXT: lw a1, 20(sp)
; RV32IF-NEXT: lw a2, 12(sp)
@@ -1247,7 +1247,7 @@ define i64 @utest_f64i64(double %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixunsdfti@plt
+; RV64-NEXT: call __fixunsdfti
; RV64-NEXT: snez a1, a1
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a0, a1, a0
@@ -1262,7 +1262,7 @@ define i64 @utest_f64i64(double %x) {
; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call __fixunsdfti@plt
+; RV32IFD-NEXT: call __fixunsdfti
; RV32IFD-NEXT: lw a0, 16(sp)
; RV32IFD-NEXT: lw a1, 20(sp)
; RV32IFD-NEXT: lw a2, 12(sp)
@@ -1298,7 +1298,7 @@ define i64 @ustest_f64i64(double %x) {
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
-; RV32IF-NEXT: call __fixdfti@plt
+; RV32IF-NEXT: call __fixdfti
; RV32IF-NEXT: lw a1, 20(sp)
; RV32IF-NEXT: lw a0, 16(sp)
; RV32IF-NEXT: beqz a1, .LBB20_2
@@ -1349,7 +1349,7 @@ define i64 @ustest_f64i64(double %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixdfti@plt
+; RV64-NEXT: call __fixdfti
; RV64-NEXT: slti a2, a1, 1
; RV64-NEXT: blez a1, .LBB20_2
; RV64-NEXT: # %bb.1: # %entry
@@ -1377,7 +1377,7 @@ define i64 @ustest_f64i64(double %x) {
; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call __fixdfti@plt
+; RV32IFD-NEXT: call __fixdfti
; RV32IFD-NEXT: lw a1, 20(sp)
; RV32IFD-NEXT: lw a0, 16(sp)
; RV32IFD-NEXT: beqz a1, .LBB20_2
@@ -1439,7 +1439,7 @@ define i64 @stest_f32i64(float %x) {
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a0, 20(sp)
; RV32-NEXT: lw a2, 16(sp)
; RV32-NEXT: lw a1, 12(sp)
@@ -1518,7 +1518,7 @@ define i64 @utest_f32i64(float %x) {
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixunssfti@plt
+; RV32-NEXT: call __fixunssfti
; RV32-NEXT: lw a0, 16(sp)
; RV32-NEXT: lw a1, 20(sp)
; RV32-NEXT: lw a2, 12(sp)
@@ -1543,7 +1543,7 @@ define i64 @utest_f32i64(float %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixunssfti@plt
+; RV64-NEXT: call __fixunssfti
; RV64-NEXT: snez a1, a1
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a0, a1, a0
@@ -1566,7 +1566,7 @@ define i64 @ustest_f32i64(float %x) {
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a1, 20(sp)
; RV32-NEXT: lw a0, 16(sp)
; RV32-NEXT: beqz a1, .LBB23_2
@@ -1617,7 +1617,7 @@ define i64 @ustest_f32i64(float %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixsfti@plt
+; RV64-NEXT: call __fixsfti
; RV64-NEXT: slti a2, a1, 1
; RV64-NEXT: blez a1, .LBB23_2
; RV64-NEXT: # %bb.1: # %entry
@@ -1654,9 +1654,9 @@ define i64 @stest_f16i64(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a0, 20(sp)
; RV32-NEXT: lw a2, 16(sp)
; RV32-NEXT: lw a1, 12(sp)
@@ -1715,8 +1715,8 @@ define i64 @stest_f16i64(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
-; RV64-NEXT: call __fixsfti@plt
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: call __fixsfti
; RV64-NEXT: li a2, -1
; RV64-NEXT: srli a3, a2, 1
; RV64-NEXT: beqz a1, .LBB24_2
@@ -1765,9 +1765,9 @@ define i64 @utesth_f16i64(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixunssfti@plt
+; RV32-NEXT: call __fixunssfti
; RV32-NEXT: lw a0, 16(sp)
; RV32-NEXT: lw a1, 20(sp)
; RV32-NEXT: lw a2, 12(sp)
@@ -1792,8 +1792,8 @@ define i64 @utesth_f16i64(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
-; RV64-NEXT: call __fixunssfti@plt
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: call __fixunssfti
; RV64-NEXT: snez a1, a1
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a0, a1, a0
@@ -1815,9 +1815,9 @@ define i64 @ustest_f16i64(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a1, 20(sp)
; RV32-NEXT: lw a0, 16(sp)
; RV32-NEXT: beqz a1, .LBB26_2
@@ -1868,8 +1868,8 @@ define i64 @ustest_f16i64(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
-; RV64-NEXT: call __fixsfti@plt
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: call __fixsfti
; RV64-NEXT: slti a2, a1, 1
; RV64-NEXT: blez a1, .LBB26_2
; RV64-NEXT: # %bb.1: # %entry
@@ -1911,7 +1911,7 @@ define i32 @stest_f64i32_mm(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfdi@plt
+; RV32IF-NEXT: call __fixdfdi
; RV32IF-NEXT: lui a2, 524288
; RV32IF-NEXT: addi a3, a2, -1
; RV32IF-NEXT: beqz a1, .LBB27_2
@@ -1950,7 +1950,7 @@ define i32 @stest_f64i32_mm(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfdi@plt
+; RV64IF-NEXT: call __fixdfdi
; RV64IF-NEXT: lui a1, 524288
; RV64IF-NEXT: addiw a2, a1, -1
; RV64IF-NEXT: blt a0, a2, .LBB27_2
@@ -2005,7 +2005,7 @@ define i32 @utest_f64i32_mm(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunsdfdi@plt
+; RV32IF-NEXT: call __fixunsdfdi
; RV32IF-NEXT: seqz a1, a1
; RV32IF-NEXT: addi a1, a1, -1
; RV32IF-NEXT: or a0, a1, a0
@@ -2019,7 +2019,7 @@ define i32 @utest_f64i32_mm(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixunsdfdi@plt
+; RV64IF-NEXT: call __fixunsdfdi
; RV64IF-NEXT: li a1, -1
; RV64IF-NEXT: srli a1, a1, 32
; RV64IF-NEXT: bltu a0, a1, .LBB28_2
@@ -2063,7 +2063,7 @@ define i32 @ustest_f64i32_mm(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfdi@plt
+; RV32IF-NEXT: call __fixdfdi
; RV32IF-NEXT: bnez a1, .LBB29_2
; RV32IF-NEXT: # %bb.1: # %entry
; RV32IF-NEXT: li a2, 1
@@ -2088,7 +2088,7 @@ define i32 @ustest_f64i32_mm(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfdi@plt
+; RV64IF-NEXT: call __fixdfdi
; RV64IF-NEXT: li a1, -1
; RV64IF-NEXT: srli a1, a1, 32
; RV64IF-NEXT: blt a0, a1, .LBB29_2
@@ -2231,8 +2231,8 @@ define i32 @stest_f16i32_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
-; RV32-NEXT: call __fixsfdi@plt
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: call __fixsfdi
; RV32-NEXT: lui a2, 524288
; RV32-NEXT: addi a3, a2, -1
; RV32-NEXT: beqz a1, .LBB33_2
@@ -2271,7 +2271,7 @@ define i32 @stest_f16i32_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: lui a1, 524288
; RV64-NEXT: addiw a2, a1, -1
@@ -2301,8 +2301,8 @@ define i32 @utesth_f16i32_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
-; RV32-NEXT: call __fixunssfdi@plt
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: call __fixunssfdi
; RV32-NEXT: seqz a1, a1
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: or a0, a1, a0
@@ -2316,7 +2316,7 @@ define i32 @utesth_f16i32_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64-NEXT: li a1, -1
; RV64-NEXT: srli a1, a1, 32
@@ -2341,8 +2341,8 @@ define i32 @ustest_f16i32_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
-; RV32-NEXT: call __fixsfdi@plt
+; RV32-NEXT: call __extendhfsf2
+; RV32-NEXT: call __fixsfdi
; RV32-NEXT: bnez a1, .LBB35_2
; RV32-NEXT: # %bb.1: # %entry
; RV32-NEXT: li a2, 1
@@ -2367,7 +2367,7 @@ define i32 @ustest_f16i32_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: li a1, -1
; RV64-NEXT: srli a1, a1, 32
@@ -2398,7 +2398,7 @@ define i16 @stest_f64i16_mm(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfsi@plt
+; RV32IF-NEXT: call __fixdfsi
; RV32IF-NEXT: lui a1, 8
; RV32IF-NEXT: addi a1, a1, -1
; RV32IF-NEXT: blt a0, a1, .LBB36_2
@@ -2420,7 +2420,7 @@ define i16 @stest_f64i16_mm(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfsi@plt
+; RV64IF-NEXT: call __fixdfsi
; RV64IF-NEXT: lui a1, 8
; RV64IF-NEXT: addiw a1, a1, -1
; RV64IF-NEXT: blt a0, a1, .LBB36_2
@@ -2488,7 +2488,7 @@ define i16 @utest_f64i16_mm(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixunsdfsi@plt
+; RV32IF-NEXT: call __fixunsdfsi
; RV32IF-NEXT: lui a1, 16
; RV32IF-NEXT: addi a1, a1, -1
; RV32IF-NEXT: bltu a0, a1, .LBB37_2
@@ -2505,7 +2505,7 @@ define i16 @utest_f64i16_mm(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixunsdfsi@plt
+; RV64IF-NEXT: call __fixunsdfsi
; RV64IF-NEXT: lui a1, 16
; RV64IF-NEXT: addiw a1, a1, -1
; RV64IF-NEXT: bltu a0, a1, .LBB37_2
@@ -2551,7 +2551,7 @@ define i16 @ustest_f64i16_mm(double %x) {
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
-; RV32IF-NEXT: call __fixdfsi@plt
+; RV32IF-NEXT: call __fixdfsi
; RV32IF-NEXT: lui a1, 16
; RV32IF-NEXT: addi a1, a1, -1
; RV32IF-NEXT: blt a0, a1, .LBB38_2
@@ -2571,7 +2571,7 @@ define i16 @ustest_f64i16_mm(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfsi@plt
+; RV64IF-NEXT: call __fixdfsi
; RV64IF-NEXT: lui a1, 16
; RV64IF-NEXT: addiw a1, a1, -1
; RV64IF-NEXT: blt a0, a1, .LBB38_2
@@ -2738,7 +2738,7 @@ define i16 @stest_f16i16_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-NEXT: lui a1, 8
; RV32-NEXT: addi a1, a1, -1
@@ -2761,7 +2761,7 @@ define i16 @stest_f16i16_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: lui a1, 8
; RV64-NEXT: addiw a1, a1, -1
@@ -2792,7 +2792,7 @@ define i16 @utesth_f16i16_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32-NEXT: lui a1, 16
; RV32-NEXT: addi a1, a1, -1
@@ -2810,7 +2810,7 @@ define i16 @utesth_f16i16_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64-NEXT: sext.w a0, a0
; RV64-NEXT: lui a1, 16
@@ -2836,7 +2836,7 @@ define i16 @ustest_f16i16_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: fcvt.w.s a0, fa0, rtz
; RV32-NEXT: lui a1, 16
; RV32-NEXT: addi a1, a1, -1
@@ -2857,7 +2857,7 @@ define i16 @ustest_f16i16_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
+; RV64-NEXT: call __extendhfsf2
; RV64-NEXT: fcvt.l.s a0, fa0, rtz
; RV64-NEXT: lui a1, 16
; RV64-NEXT: addiw a1, a1, -1
@@ -2891,7 +2891,7 @@ define i64 @stest_f64i64_mm(double %x) {
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
-; RV32IF-NEXT: call __fixdfti@plt
+; RV32IF-NEXT: call __fixdfti
; RV32IF-NEXT: lw a0, 20(sp)
; RV32IF-NEXT: lw a2, 16(sp)
; RV32IF-NEXT: lw a1, 12(sp)
@@ -2950,7 +2950,7 @@ define i64 @stest_f64i64_mm(double %x) {
; RV64IF-NEXT: .cfi_def_cfa_offset 16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: .cfi_offset ra, -8
-; RV64IF-NEXT: call __fixdfti@plt
+; RV64IF-NEXT: call __fixdfti
; RV64IF-NEXT: li a2, -1
; RV64IF-NEXT: srli a3, a2, 1
; RV64IF-NEXT: beqz a1, .LBB45_2
@@ -2990,7 +2990,7 @@ define i64 @stest_f64i64_mm(double %x) {
; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call __fixdfti@plt
+; RV32IFD-NEXT: call __fixdfti
; RV32IFD-NEXT: lw a0, 20(sp)
; RV32IFD-NEXT: lw a2, 16(sp)
; RV32IFD-NEXT: lw a1, 12(sp)
@@ -3069,7 +3069,7 @@ define i64 @utest_f64i64_mm(double %x) {
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
-; RV32IF-NEXT: call __fixunsdfti@plt
+; RV32IF-NEXT: call __fixunsdfti
; RV32IF-NEXT: lw a0, 16(sp)
; RV32IF-NEXT: lw a1, 20(sp)
; RV32IF-NEXT: lw a2, 12(sp)
@@ -3094,7 +3094,7 @@ define i64 @utest_f64i64_mm(double %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixunsdfti@plt
+; RV64-NEXT: call __fixunsdfti
; RV64-NEXT: snez a1, a1
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a0, a1, a0
@@ -3109,7 +3109,7 @@ define i64 @utest_f64i64_mm(double %x) {
; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call __fixunsdfti@plt
+; RV32IFD-NEXT: call __fixunsdfti
; RV32IFD-NEXT: lw a0, 16(sp)
; RV32IFD-NEXT: lw a1, 20(sp)
; RV32IFD-NEXT: lw a2, 12(sp)
@@ -3144,7 +3144,7 @@ define i64 @ustest_f64i64_mm(double %x) {
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: addi a0, sp, 8
-; RV32IF-NEXT: call __fixdfti@plt
+; RV32IF-NEXT: call __fixdfti
; RV32IF-NEXT: lw a0, 8(sp)
; RV32IF-NEXT: lw a1, 12(sp)
; RV32IF-NEXT: lw a2, 20(sp)
@@ -3179,7 +3179,7 @@ define i64 @ustest_f64i64_mm(double %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixdfti@plt
+; RV64-NEXT: call __fixdfti
; RV64-NEXT: mv a2, a1
; RV64-NEXT: blez a1, .LBB47_2
; RV64-NEXT: # %bb.1: # %entry
@@ -3202,7 +3202,7 @@ define i64 @ustest_f64i64_mm(double %x) {
; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call __fixdfti@plt
+; RV32IFD-NEXT: call __fixdfti
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw a1, 12(sp)
; RV32IFD-NEXT: lw a2, 20(sp)
@@ -3246,7 +3246,7 @@ define i64 @stest_f32i64_mm(float %x) {
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a0, 20(sp)
; RV32-NEXT: lw a2, 16(sp)
; RV32-NEXT: lw a1, 12(sp)
@@ -3323,7 +3323,7 @@ define i64 @utest_f32i64_mm(float %x) {
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixunssfti@plt
+; RV32-NEXT: call __fixunssfti
; RV32-NEXT: lw a0, 16(sp)
; RV32-NEXT: lw a1, 20(sp)
; RV32-NEXT: lw a2, 12(sp)
@@ -3348,7 +3348,7 @@ define i64 @utest_f32i64_mm(float %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixunssfti@plt
+; RV64-NEXT: call __fixunssfti
; RV64-NEXT: snez a1, a1
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a0, a1, a0
@@ -3370,7 +3370,7 @@ define i64 @ustest_f32i64_mm(float %x) {
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a0, 8(sp)
; RV32-NEXT: lw a1, 12(sp)
; RV32-NEXT: lw a2, 20(sp)
@@ -3405,7 +3405,7 @@ define i64 @ustest_f32i64_mm(float %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __fixsfti@plt
+; RV64-NEXT: call __fixsfti
; RV64-NEXT: mv a2, a1
; RV64-NEXT: blez a1, .LBB50_2
; RV64-NEXT: # %bb.1: # %entry
@@ -3435,9 +3435,9 @@ define i64 @stest_f16i64_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a0, 20(sp)
; RV32-NEXT: lw a2, 16(sp)
; RV32-NEXT: lw a1, 12(sp)
@@ -3496,8 +3496,8 @@ define i64 @stest_f16i64_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
-; RV64-NEXT: call __fixsfti@plt
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: call __fixsfti
; RV64-NEXT: li a2, -1
; RV64-NEXT: srli a3, a2, 1
; RV64-NEXT: beqz a1, .LBB51_2
@@ -3544,9 +3544,9 @@ define i64 @utesth_f16i64_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixunssfti@plt
+; RV32-NEXT: call __fixunssfti
; RV32-NEXT: lw a0, 16(sp)
; RV32-NEXT: lw a1, 20(sp)
; RV32-NEXT: lw a2, 12(sp)
@@ -3571,8 +3571,8 @@ define i64 @utesth_f16i64_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
-; RV64-NEXT: call __fixunssfti@plt
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: call __fixunssfti
; RV64-NEXT: snez a1, a1
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a0, a1, a0
@@ -3593,9 +3593,9 @@ define i64 @ustest_f16i64_mm(half %x) {
; RV32-NEXT: .cfi_def_cfa_offset 32
; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call __extendhfsf2@plt
+; RV32-NEXT: call __extendhfsf2
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: call __fixsfti@plt
+; RV32-NEXT: call __fixsfti
; RV32-NEXT: lw a0, 8(sp)
; RV32-NEXT: lw a1, 12(sp)
; RV32-NEXT: lw a2, 20(sp)
@@ -3630,8 +3630,8 @@ define i64 @ustest_f16i64_mm(half %x) {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call __extendhfsf2@plt
-; RV64-NEXT: call __fixsfti@plt
+; RV64-NEXT: call __extendhfsf2
+; RV64-NEXT: call __fixsfti
; RV64-NEXT: mv a2, a1
; RV64-NEXT: blez a1, .LBB53_2
; RV64-NEXT: # %bb.1: # %entry
diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll
index 95c4798c32ec..bc4f89e91774 100644
--- a/llvm/test/CodeGen/RISCV/frame-info.ll
+++ b/llvm/test/CodeGen/RISCV/frame-info.ll
@@ -64,7 +64,7 @@ define void @stack_alloc(i32 signext %size) {
; RV32-NEXT: andi a0, a0, -16
; RV32-NEXT: sub a0, sp, a0
; RV32-NEXT: mv sp, a0
-; RV32-NEXT: call callee_with_args@plt
+; RV32-NEXT: call callee_with_args
; RV32-NEXT: addi sp, s0, -16
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -87,7 +87,7 @@ define void @stack_alloc(i32 signext %size) {
; RV64-NEXT: andi a0, a0, -16
; RV64-NEXT: sub a0, sp, a0
; RV64-NEXT: mv sp, a0
-; RV64-NEXT: call callee_with_args@plt
+; RV64-NEXT: call callee_with_args
; RV64-NEXT: addi sp, s0, -16
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -108,7 +108,7 @@ define void @stack_alloc(i32 signext %size) {
; RV32-WITHFP-NEXT: andi a0, a0, -16
; RV32-WITHFP-NEXT: sub a0, sp, a0
; RV32-WITHFP-NEXT: mv sp, a0
-; RV32-WITHFP-NEXT: call callee_with_args@plt
+; RV32-WITHFP-NEXT: call callee_with_args
; RV32-WITHFP-NEXT: addi sp, s0, -16
; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -131,7 +131,7 @@ define void @stack_alloc(i32 signext %size) {
; RV64-WITHFP-NEXT: andi a0, a0, -16
; RV64-WITHFP-NEXT: sub a0, sp, a0
; RV64-WITHFP-NEXT: mv sp, a0
-; RV64-WITHFP-NEXT: call callee_with_args@plt
+; RV64-WITHFP-NEXT: call callee_with_args
; RV64-WITHFP-NEXT: addi sp, s0, -16
; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -149,13 +149,13 @@ define void @branch_and_tail_call(i1 %a) {
; RV32-NEXT: andi a0, a0, 1
; RV32-NEXT: beqz a0, .LBB2_2
; RV32-NEXT: # %bb.1: # %blue_pill
-; RV32-NEXT: tail callee1@plt
+; RV32-NEXT: tail callee1
; RV32-NEXT: .LBB2_2: # %red_pill
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call callee2@plt
+; RV32-NEXT: call callee2
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -165,13 +165,13 @@ define void @branch_and_tail_call(i1 %a) {
; RV64-NEXT: andi a0, a0, 1
; RV64-NEXT: beqz a0, .LBB2_2
; RV64-NEXT: # %bb.1: # %blue_pill
-; RV64-NEXT: tail callee1@plt
+; RV64-NEXT: tail callee1
; RV64-NEXT: .LBB2_2: # %red_pill
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call callee2@plt
+; RV64-NEXT: call callee2
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -181,7 +181,7 @@ define void @branch_and_tail_call(i1 %a) {
; RV32-WITHFP-NEXT: andi a0, a0, 1
; RV32-WITHFP-NEXT: beqz a0, .LBB2_2
; RV32-WITHFP-NEXT: # %bb.1: # %blue_pill
-; RV32-WITHFP-NEXT: tail callee1@plt
+; RV32-WITHFP-NEXT: tail callee1
; RV32-WITHFP-NEXT: .LBB2_2: # %red_pill
; RV32-WITHFP-NEXT: addi sp, sp, -16
; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 16
@@ -191,7 +191,7 @@ define void @branch_and_tail_call(i1 %a) {
; RV32-WITHFP-NEXT: .cfi_offset s0, -8
; RV32-WITHFP-NEXT: addi s0, sp, 16
; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0
-; RV32-WITHFP-NEXT: call callee2@plt
+; RV32-WITHFP-NEXT: call callee2
; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32-WITHFP-NEXT: addi sp, sp, 16
@@ -202,7 +202,7 @@ define void @branch_and_tail_call(i1 %a) {
; RV64-WITHFP-NEXT: andi a0, a0, 1
; RV64-WITHFP-NEXT: beqz a0, .LBB2_2
; RV64-WITHFP-NEXT: # %bb.1: # %blue_pill
-; RV64-WITHFP-NEXT: tail callee1@plt
+; RV64-WITHFP-NEXT: tail callee1
; RV64-WITHFP-NEXT: .LBB2_2: # %red_pill
; RV64-WITHFP-NEXT: addi sp, sp, -16
; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 16
@@ -212,7 +212,7 @@ define void @branch_and_tail_call(i1 %a) {
; RV64-WITHFP-NEXT: .cfi_offset s0, -16
; RV64-WITHFP-NEXT: addi s0, sp, 16
; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0
-; RV64-WITHFP-NEXT: call callee2@plt
+; RV64-WITHFP-NEXT: call callee2
; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64-WITHFP-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/frame.ll b/llvm/test/CodeGen/RISCV/frame.ll
index 183a0f47c68c..d50f1e55417a 100644
--- a/llvm/test/CodeGen/RISCV/frame.ll
+++ b/llvm/test/CodeGen/RISCV/frame.ll
@@ -17,7 +17,7 @@ define i32 @test() nounwind {
; RV32I-FPELIM-NEXT: sw zero, 12(sp)
; RV32I-FPELIM-NEXT: sw zero, 8(sp)
; RV32I-FPELIM-NEXT: addi a0, sp, 12
-; RV32I-FPELIM-NEXT: call test1@plt
+; RV32I-FPELIM-NEXT: call test1
; RV32I-FPELIM-NEXT: li a0, 0
; RV32I-FPELIM-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-FPELIM-NEXT: addi sp, sp, 32
@@ -35,7 +35,7 @@ define i32 @test() nounwind {
; RV32I-WITHFP-NEXT: sw zero, -28(s0)
; RV32I-WITHFP-NEXT: sw zero, -32(s0)
; RV32I-WITHFP-NEXT: addi a0, s0, -28
-; RV32I-WITHFP-NEXT: call test1@plt
+; RV32I-WITHFP-NEXT: call test1
; RV32I-WITHFP-NEXT: li a0, 0
; RV32I-WITHFP-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-WITHFP-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
index 23379c9430ac..478c8457997a 100644
--- a/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
+++ b/llvm/test/CodeGen/RISCV/frameaddr-returnaddr.ll
@@ -74,7 +74,7 @@ define ptr @test_frameaddress_3_alloca() nounwind {
; RV32I-NEXT: sw s0, 104(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi s0, sp, 112
; RV32I-NEXT: addi a0, s0, -108
-; RV32I-NEXT: call notdead@plt
+; RV32I-NEXT: call notdead
; RV32I-NEXT: lw a0, -8(s0)
; RV32I-NEXT: lw a0, -8(a0)
; RV32I-NEXT: lw a0, -8(a0)
@@ -90,7 +90,7 @@ define ptr @test_frameaddress_3_alloca() nounwind {
; RV64I-NEXT: sd s0, 112(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi s0, sp, 128
; RV64I-NEXT: addi a0, s0, -116
-; RV64I-NEXT: call notdead@plt
+; RV64I-NEXT: call notdead
; RV64I-NEXT: ld a0, -16(s0)
; RV64I-NEXT: ld a0, -16(a0)
; RV64I-NEXT: ld a0, -16(a0)
diff --git a/llvm/test/CodeGen/RISCV/ghccc-rv32.ll b/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
index bf1f41195c5d..0f9511125adb 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-rv32.ll
@@ -78,7 +78,7 @@ define ghccc void @foo() nounwind {
; CHECK-NEXT: lw s2, %lo(sp)(a0)
; CHECK-NEXT: lui a0, %hi(base)
; CHECK-NEXT: lw s1, %lo(base)(a0)
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
entry:
%0 = load double, ptr @d6
%1 = load double, ptr @d5
diff --git a/llvm/test/CodeGen/RISCV/ghccc-rv64.ll b/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
index 9d2091df1f19..79afd4bc375d 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-rv64.ll
@@ -78,7 +78,7 @@ define ghccc void @foo() nounwind {
; CHECK-NEXT: ld s2, %lo(sp)(a0)
; CHECK-NEXT: lui a0, %hi(base)
; CHECK-NEXT: ld s1, %lo(base)(a0)
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
entry:
%0 = load double, ptr @d6
%1 = load double, ptr @d5
diff --git a/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll
index 32df34fe2c0f..6437beae0901 100644
--- a/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/ghccc-without-f-reg.ll
@@ -25,7 +25,7 @@ define ghccc void @caller_float() nounwind {
; CHECK-NEXT: lw s2, %lo(f2)(a0)
; CHECK-NEXT: lui a0, %hi(f1)
; CHECK-NEXT: lw s1, %lo(f1)(a0)
-; CHECK-NEXT: tail callee_float@plt
+; CHECK-NEXT: tail callee_float
entry:
%0 = load float, ptr @f6
%1 = load float, ptr @f5
@@ -61,7 +61,7 @@ define ghccc void @caller_double() nounwind {
; CHECK-NEXT: ld s2, %lo(d2)(a0)
; CHECK-NEXT: lui a0, %hi(d1)
; CHECK-NEXT: ld s1, %lo(d1)(a0)
-; CHECK-NEXT: tail callee_double@plt
+; CHECK-NEXT: tail callee_double
entry:
%0 = load double, ptr @d6
%1 = load double, ptr @d5
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 98c732122951..f54adaa24b1b 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -47,14 +47,14 @@ define half @fadd_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -73,14 +73,14 @@ define half @fadd_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -136,14 +136,14 @@ define half @fsub_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __subsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __subsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -162,14 +162,14 @@ define half @fsub_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __subsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __subsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -225,14 +225,14 @@ define half @fmul_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __mulsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __mulsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -251,14 +251,14 @@ define half @fmul_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __mulsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __mulsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -314,14 +314,14 @@ define half @fdiv_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __divsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __divsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -340,14 +340,14 @@ define half @fdiv_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __divsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __divsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -400,9 +400,9 @@ define half @fsqrt_s(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call sqrtf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call sqrtf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -413,9 +413,9 @@ define half @fsqrt_s(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call sqrtf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call sqrtf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -577,21 +577,21 @@ define i32 @fneg_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s1, a1, -1
; RV32I-NEXT: and a0, a0, s1
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s1
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a0, 524288
; RV32I-NEXT: xor a0, s0, a0
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s1
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __eqsf2@plt
+; RV32I-NEXT: call __eqsf2
; RV32I-NEXT: seqz a0, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -608,21 +608,21 @@ define i32 @fneg_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s1, a1, -1
; RV64I-NEXT: and a0, a0, s1
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s1
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a0, 524288
; RV64I-NEXT: xor a0, s0, a0
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s1
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __eqsf2@plt
+; RV64I-NEXT: call __eqsf2
; RV64I-NEXT: seqz a0, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -699,19 +699,19 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lui a1, 1048568
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: slli s1, s1, 17
@@ -738,19 +738,19 @@ define half @fsgnjn_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a0, 16
; RV64I-NEXT: addiw s3, a0, -1
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: lui a1, 1048568
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: slli s1, s1, 49
@@ -908,25 +908,25 @@ define half @fabs_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: slli a0, a0, 1
; RV32I-NEXT: srli a0, a0, 1
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -945,25 +945,25 @@ define half @fabs_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: slli a0, a0, 33
; RV64I-NEXT: srli a0, a0, 33
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1041,14 +1041,14 @@ define half @fmin_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call fminf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fminf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1067,14 +1067,14 @@ define half @fmin_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fminf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fminf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1132,14 +1132,14 @@ define half @fmax_s(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call fmaxf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaxf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1158,14 +1158,14 @@ define half @fmax_s(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fmaxf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaxf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1225,18 +1225,18 @@ define half @fmadd_s(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1258,18 +1258,18 @@ define half @fmadd_s(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -1334,29 +1334,29 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: and a0, a2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1378,29 +1378,29 @@ define half @fmsub_s(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a0, 16
; RV64I-NEXT: addiw s3, a0, -1
; RV64I-NEXT: and a0, a2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -1489,41 +1489,41 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui s3, 16
; RV32I-NEXT: addi s3, s3, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lui s4, 524288
; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1547,41 +1547,41 @@ define half @fnmadd_s(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui s3, 16
; RV64I-NEXT: addiw s3, s3, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: lui s4, 524288
; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -1691,41 +1691,41 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui s3, 16
; RV32I-NEXT: addi s3, s3, -1
; RV32I-NEXT: and a0, a1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lui s4, 524288
; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s2
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1749,41 +1749,41 @@ define half @fnmadd_s_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui s3, 16
; RV64I-NEXT: addiw s3, s3, -1
; RV64I-NEXT: and a0, a1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: lui s4, 524288
; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s2
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -1902,18 +1902,18 @@ define half @fnmadd_s_3(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lui a1, 1048568
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -1937,18 +1937,18 @@ define half @fnmadd_s_3(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: lui a1, 1048568
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
@@ -2033,18 +2033,18 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lui a1, 1048568
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -2068,18 +2068,18 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: lui a1, 1048568
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
@@ -2154,28 +2154,28 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: mv a2, s0
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -2197,28 +2197,28 @@ define half @fnmsub_s(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -2304,29 +2304,29 @@ define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: and a0, a1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a2, s0
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -2348,29 +2348,29 @@ define half @fnmsub_s_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a0, 16
; RV64I-NEXT: addiw s3, a0, -1
; RV64I-NEXT: and a0, a1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a2, s0
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -2453,23 +2453,23 @@ define half @fmadd_s_contract(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __mulsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __mulsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -2491,23 +2491,23 @@ define half @fmadd_s_contract(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __mulsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __mulsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -2582,29 +2582,29 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a0, 16
; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: and a0, a2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __mulsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __mulsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __subsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __subsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -2626,29 +2626,29 @@ define half @fmsub_s_contract(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a0, 16
; RV64I-NEXT: addiw s3, a0, -1
; RV64I-NEXT: and a0, a2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __mulsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __mulsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __subsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __subsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -2738,46 +2738,46 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui s3, 16
; RV32I-NEXT: addi s3, s3, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __mulsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __mulsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __subsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __subsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -2799,46 +2799,46 @@ define half @fnmadd_s_contract(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui s3, 16
; RV64I-NEXT: addiw s3, s3, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __mulsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __mulsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __subsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __subsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -2956,36 +2956,36 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui s3, 16
; RV32I-NEXT: addi s3, s3, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __mulsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __mulsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __subsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __subsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -3007,36 +3007,36 @@ define half @fnmsub_s_contract(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui s3, 16
; RV64I-NEXT: addiw s3, s3, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __mulsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __mulsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __subsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __subsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
index f4d632449acd..6699ee947937 100644
--- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
@@ -30,7 +30,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB0_2: # %if.else
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_false:
; RV64IZFH: # %bb.0:
@@ -41,7 +41,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB0_2: # %if.else
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_false:
; RV32IZHINX: # %bb.0:
@@ -52,7 +52,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB0_2: # %if.else
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_false:
; RV64IZHINX: # %bb.0:
@@ -63,7 +63,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB0_2: # %if.else
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_false:
; RV32IZFHMIN: # %bb.0:
@@ -74,7 +74,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB0_2: # %if.else
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_false:
; RV64IZFHMIN: # %bb.0:
@@ -85,7 +85,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB0_2: # %if.else
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_false:
; RV32IZHINXMIN: # %bb.0:
@@ -96,7 +96,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB0_2: # %if.else
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_false:
; RV64IZHINXMIN: # %bb.0:
@@ -107,7 +107,7 @@ define void @br_fcmp_false(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB0_2: # %if.else
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp false half %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -127,7 +127,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB1_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_oeq:
; RV64IZFH: # %bb.0:
@@ -138,7 +138,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB1_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_oeq:
; RV32IZHINX: # %bb.0:
@@ -149,7 +149,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB1_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_oeq:
; RV64IZHINX: # %bb.0:
@@ -160,7 +160,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB1_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_oeq:
; RV32IZFHMIN: # %bb.0:
@@ -173,7 +173,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB1_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_oeq:
; RV64IZFHMIN: # %bb.0:
@@ -186,7 +186,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB1_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_oeq:
; RV32IZHINXMIN: # %bb.0:
@@ -199,7 +199,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB1_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_oeq:
; RV64IZHINXMIN: # %bb.0:
@@ -212,7 +212,7 @@ define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB1_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp oeq half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -235,7 +235,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB2_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_oeq_alt:
; RV64IZFH: # %bb.0:
@@ -246,7 +246,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB2_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_oeq_alt:
; RV32IZHINX: # %bb.0:
@@ -257,7 +257,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB2_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_oeq_alt:
; RV64IZHINX: # %bb.0:
@@ -268,7 +268,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB2_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_oeq_alt:
; RV32IZFHMIN: # %bb.0:
@@ -281,7 +281,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB2_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_oeq_alt:
; RV64IZFHMIN: # %bb.0:
@@ -294,7 +294,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB2_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_oeq_alt:
; RV32IZHINXMIN: # %bb.0:
@@ -307,7 +307,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB2_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_oeq_alt:
; RV64IZHINXMIN: # %bb.0:
@@ -320,7 +320,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB2_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp oeq half %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -340,7 +340,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB3_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_ogt:
; RV64IZFH: # %bb.0:
@@ -351,7 +351,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB3_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_ogt:
; RV32IZHINX: # %bb.0:
@@ -362,7 +362,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB3_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_ogt:
; RV64IZHINX: # %bb.0:
@@ -373,7 +373,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB3_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_ogt:
; RV32IZFHMIN: # %bb.0:
@@ -386,7 +386,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB3_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_ogt:
; RV64IZFHMIN: # %bb.0:
@@ -399,7 +399,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB3_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_ogt:
; RV32IZHINXMIN: # %bb.0:
@@ -412,7 +412,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB3_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_ogt:
; RV64IZHINXMIN: # %bb.0:
@@ -425,7 +425,7 @@ define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB3_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp ogt half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -445,7 +445,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB4_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_oge:
; RV64IZFH: # %bb.0:
@@ -456,7 +456,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB4_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_oge:
; RV32IZHINX: # %bb.0:
@@ -467,7 +467,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB4_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_oge:
; RV64IZHINX: # %bb.0:
@@ -478,7 +478,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB4_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_oge:
; RV32IZFHMIN: # %bb.0:
@@ -491,7 +491,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB4_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_oge:
; RV64IZFHMIN: # %bb.0:
@@ -504,7 +504,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB4_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_oge:
; RV32IZHINXMIN: # %bb.0:
@@ -517,7 +517,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB4_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_oge:
; RV64IZHINXMIN: # %bb.0:
@@ -530,7 +530,7 @@ define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB4_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp oge half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -550,7 +550,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB5_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_olt:
; RV64IZFH: # %bb.0:
@@ -561,7 +561,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB5_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_olt:
; RV32IZHINX: # %bb.0:
@@ -572,7 +572,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB5_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_olt:
; RV64IZHINX: # %bb.0:
@@ -583,7 +583,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB5_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_olt:
; RV32IZFHMIN: # %bb.0:
@@ -596,7 +596,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB5_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_olt:
; RV64IZFHMIN: # %bb.0:
@@ -609,7 +609,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB5_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_olt:
; RV32IZHINXMIN: # %bb.0:
@@ -622,7 +622,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB5_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_olt:
; RV64IZHINXMIN: # %bb.0:
@@ -635,7 +635,7 @@ define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB5_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp olt half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -655,7 +655,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB6_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_ole:
; RV64IZFH: # %bb.0:
@@ -666,7 +666,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB6_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_ole:
; RV32IZHINX: # %bb.0:
@@ -677,7 +677,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB6_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_ole:
; RV64IZHINX: # %bb.0:
@@ -688,7 +688,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB6_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_ole:
; RV32IZFHMIN: # %bb.0:
@@ -701,7 +701,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB6_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_ole:
; RV64IZFHMIN: # %bb.0:
@@ -714,7 +714,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB6_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_ole:
; RV32IZHINXMIN: # %bb.0:
@@ -727,7 +727,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB6_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_ole:
; RV64IZHINXMIN: # %bb.0:
@@ -740,7 +740,7 @@ define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB6_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp ole half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -762,7 +762,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB7_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_one:
; RV64IZFH: # %bb.0:
@@ -775,7 +775,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB7_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_one:
; RV32IZHINX: # %bb.0:
@@ -788,7 +788,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB7_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_one:
; RV64IZHINX: # %bb.0:
@@ -801,7 +801,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB7_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_one:
; RV32IZFHMIN: # %bb.0:
@@ -816,7 +816,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB7_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_one:
; RV64IZFHMIN: # %bb.0:
@@ -831,7 +831,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB7_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_one:
; RV32IZHINXMIN: # %bb.0:
@@ -846,7 +846,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB7_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_one:
; RV64IZHINXMIN: # %bb.0:
@@ -861,7 +861,7 @@ define void @br_fcmp_one(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB7_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp one half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -883,7 +883,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB8_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_ord:
; RV64IZFH: # %bb.0:
@@ -896,7 +896,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB8_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_ord:
; RV32IZHINX: # %bb.0:
@@ -909,7 +909,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB8_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_ord:
; RV64IZHINX: # %bb.0:
@@ -922,7 +922,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB8_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_ord:
; RV32IZFHMIN: # %bb.0:
@@ -937,7 +937,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB8_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_ord:
; RV64IZFHMIN: # %bb.0:
@@ -952,7 +952,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB8_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_ord:
; RV32IZHINXMIN: # %bb.0:
@@ -967,7 +967,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB8_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_ord:
; RV64IZHINXMIN: # %bb.0:
@@ -982,7 +982,7 @@ define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB8_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp ord half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1004,7 +1004,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB9_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_ueq:
; RV64IZFH: # %bb.0:
@@ -1017,7 +1017,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB9_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_ueq:
; RV32IZHINX: # %bb.0:
@@ -1030,7 +1030,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB9_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_ueq:
; RV64IZHINX: # %bb.0:
@@ -1043,7 +1043,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB9_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_ueq:
; RV32IZFHMIN: # %bb.0:
@@ -1058,7 +1058,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB9_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_ueq:
; RV64IZFHMIN: # %bb.0:
@@ -1073,7 +1073,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB9_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_ueq:
; RV32IZHINXMIN: # %bb.0:
@@ -1088,7 +1088,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB9_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_ueq:
; RV64IZHINXMIN: # %bb.0:
@@ -1103,7 +1103,7 @@ define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB9_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp ueq half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1123,7 +1123,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB10_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_ugt:
; RV64IZFH: # %bb.0:
@@ -1134,7 +1134,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB10_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_ugt:
; RV32IZHINX: # %bb.0:
@@ -1145,7 +1145,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB10_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_ugt:
; RV64IZHINX: # %bb.0:
@@ -1156,7 +1156,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB10_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_ugt:
; RV32IZFHMIN: # %bb.0:
@@ -1169,7 +1169,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB10_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_ugt:
; RV64IZFHMIN: # %bb.0:
@@ -1182,7 +1182,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB10_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_ugt:
; RV32IZHINXMIN: # %bb.0:
@@ -1195,7 +1195,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB10_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_ugt:
; RV64IZHINXMIN: # %bb.0:
@@ -1208,7 +1208,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB10_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp ugt half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1228,7 +1228,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB11_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_uge:
; RV64IZFH: # %bb.0:
@@ -1239,7 +1239,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB11_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_uge:
; RV32IZHINX: # %bb.0:
@@ -1250,7 +1250,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB11_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_uge:
; RV64IZHINX: # %bb.0:
@@ -1261,7 +1261,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB11_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_uge:
; RV32IZFHMIN: # %bb.0:
@@ -1274,7 +1274,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB11_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_uge:
; RV64IZFHMIN: # %bb.0:
@@ -1287,7 +1287,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB11_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_uge:
; RV32IZHINXMIN: # %bb.0:
@@ -1300,7 +1300,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB11_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_uge:
; RV64IZHINXMIN: # %bb.0:
@@ -1313,7 +1313,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB11_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp uge half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1333,7 +1333,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB12_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_ult:
; RV64IZFH: # %bb.0:
@@ -1344,7 +1344,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB12_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_ult:
; RV32IZHINX: # %bb.0:
@@ -1355,7 +1355,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB12_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_ult:
; RV64IZHINX: # %bb.0:
@@ -1366,7 +1366,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB12_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_ult:
; RV32IZFHMIN: # %bb.0:
@@ -1379,7 +1379,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB12_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_ult:
; RV64IZFHMIN: # %bb.0:
@@ -1392,7 +1392,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB12_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_ult:
; RV32IZHINXMIN: # %bb.0:
@@ -1405,7 +1405,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB12_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_ult:
; RV64IZHINXMIN: # %bb.0:
@@ -1418,7 +1418,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB12_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp ult half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1438,7 +1438,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB13_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_ule:
; RV64IZFH: # %bb.0:
@@ -1449,7 +1449,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB13_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_ule:
; RV32IZHINX: # %bb.0:
@@ -1460,7 +1460,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB13_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_ule:
; RV64IZHINX: # %bb.0:
@@ -1471,7 +1471,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB13_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_ule:
; RV32IZFHMIN: # %bb.0:
@@ -1484,7 +1484,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB13_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_ule:
; RV64IZFHMIN: # %bb.0:
@@ -1497,7 +1497,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB13_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_ule:
; RV32IZHINXMIN: # %bb.0:
@@ -1510,7 +1510,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB13_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_ule:
; RV64IZHINXMIN: # %bb.0:
@@ -1523,7 +1523,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB13_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp ule half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1543,7 +1543,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB14_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_une:
; RV64IZFH: # %bb.0:
@@ -1554,7 +1554,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB14_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_une:
; RV32IZHINX: # %bb.0:
@@ -1565,7 +1565,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB14_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_une:
; RV64IZHINX: # %bb.0:
@@ -1576,7 +1576,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB14_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_une:
; RV32IZFHMIN: # %bb.0:
@@ -1589,7 +1589,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB14_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_une:
; RV64IZFHMIN: # %bb.0:
@@ -1602,7 +1602,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB14_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_une:
; RV32IZHINXMIN: # %bb.0:
@@ -1615,7 +1615,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB14_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_une:
; RV64IZHINXMIN: # %bb.0:
@@ -1628,7 +1628,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB14_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp une half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1650,7 +1650,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB15_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_uno:
; RV64IZFH: # %bb.0:
@@ -1663,7 +1663,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB15_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_uno:
; RV32IZHINX: # %bb.0:
@@ -1676,7 +1676,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB15_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_uno:
; RV64IZHINX: # %bb.0:
@@ -1689,7 +1689,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB15_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_uno:
; RV32IZFHMIN: # %bb.0:
@@ -1704,7 +1704,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB15_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_uno:
; RV64IZFHMIN: # %bb.0:
@@ -1719,7 +1719,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB15_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_uno:
; RV32IZHINXMIN: # %bb.0:
@@ -1734,7 +1734,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB15_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_uno:
; RV64IZHINXMIN: # %bb.0:
@@ -1749,7 +1749,7 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB15_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp uno half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -1769,7 +1769,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV32IZFH-NEXT: .LBB16_2: # %if.then
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call abort@plt
+; RV32IZFH-NEXT: call abort
;
; RV64IZFH-LABEL: br_fcmp_true:
; RV64IZFH: # %bb.0:
@@ -1780,7 +1780,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV64IZFH-NEXT: .LBB16_2: # %if.then
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call abort@plt
+; RV64IZFH-NEXT: call abort
;
; RV32IZHINX-LABEL: br_fcmp_true:
; RV32IZHINX: # %bb.0:
@@ -1791,7 +1791,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: .LBB16_2: # %if.then
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call abort@plt
+; RV32IZHINX-NEXT: call abort
;
; RV64IZHINX-LABEL: br_fcmp_true:
; RV64IZHINX: # %bb.0:
@@ -1802,7 +1802,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: .LBB16_2: # %if.then
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call abort@plt
+; RV64IZHINX-NEXT: call abort
;
; RV32IZFHMIN-LABEL: br_fcmp_true:
; RV32IZFHMIN: # %bb.0:
@@ -1813,7 +1813,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: .LBB16_2: # %if.then
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFHMIN-NEXT: call abort@plt
+; RV32IZFHMIN-NEXT: call abort
;
; RV64IZFHMIN-LABEL: br_fcmp_true:
; RV64IZFHMIN: # %bb.0:
@@ -1824,7 +1824,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: .LBB16_2: # %if.then
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFHMIN-NEXT: call abort@plt
+; RV64IZFHMIN-NEXT: call abort
;
; RV32IZHINXMIN-LABEL: br_fcmp_true:
; RV32IZHINXMIN: # %bb.0:
@@ -1835,7 +1835,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: .LBB16_2: # %if.then
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINXMIN-NEXT: call abort@plt
+; RV32IZHINXMIN-NEXT: call abort
;
; RV64IZHINXMIN-LABEL: br_fcmp_true:
; RV64IZHINXMIN: # %bb.0:
@@ -1846,7 +1846,7 @@ define void @br_fcmp_true(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: .LBB16_2: # %if.then
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: call abort@plt
+; RV64IZHINXMIN-NEXT: call abort
%1 = fcmp true half %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index f6f85d3afeb0..f03a020762bb 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -460,7 +460,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __fixhfdi@plt
+; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -474,7 +474,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __fixhfdi@plt
+; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -488,7 +488,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __fixhfdi@plt
+; RV32IDZFH-NEXT: call __fixhfdi
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -502,7 +502,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __fixhfdi@plt
+; RV32IZDINXZHINX-NEXT: call __fixhfdi
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -516,7 +516,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __fixhfdi@plt
+; CHECK32-IZFHMIN-NEXT: call __fixhfdi
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -531,7 +531,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __fixhfdi@plt
+; CHECK32-IZHINXMIN-NEXT: call __fixhfdi
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -546,7 +546,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __fixhfdi@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __fixhfdi
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -566,7 +566,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __fixunshfdi@plt
+; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -580,7 +580,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -594,7 +594,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __fixunshfdi@plt
+; RV32IDZFH-NEXT: call __fixunshfdi
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -608,7 +608,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZDINXZHINX-NEXT: call __fixunshfdi
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -622,7 +622,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __fixunshfdi@plt
+; CHECK32-IZFHMIN-NEXT: call __fixunshfdi
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -637,7 +637,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __fixunshfdi@plt
+; CHECK32-IZHINXMIN-NEXT: call __fixunshfdi
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -652,7 +652,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunshfdi@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunshfdi
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -1359,7 +1359,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __floatdihf@plt
+; RV32IZFH-NEXT: call __floatdihf
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -1373,7 +1373,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __floatdihf@plt
+; RV32IZHINX-NEXT: call __floatdihf
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -1387,7 +1387,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __floatdihf@plt
+; RV32IDZFH-NEXT: call __floatdihf
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -1401,7 +1401,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __floatdihf@plt
+; RV32IZDINXZHINX-NEXT: call __floatdihf
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -1415,7 +1415,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __floatdihf@plt
+; CHECK32-IZFHMIN-NEXT: call __floatdihf
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -1430,7 +1430,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __floatdihf@plt
+; CHECK32-IZHINXMIN-NEXT: call __floatdihf
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -1445,7 +1445,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __floatdihf@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __floatdihf
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -1465,7 +1465,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __floatundihf@plt
+; RV32IZFH-NEXT: call __floatundihf
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -1479,7 +1479,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __floatundihf@plt
+; RV32IZHINX-NEXT: call __floatundihf
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -1493,7 +1493,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __floatundihf@plt
+; RV32IDZFH-NEXT: call __floatundihf
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -1507,7 +1507,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __floatundihf@plt
+; RV32IZDINXZHINX-NEXT: call __floatundihf
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -1521,7 +1521,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __floatundihf@plt
+; CHECK32-IZFHMIN-NEXT: call __floatundihf
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -1536,7 +1536,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __floatundihf@plt
+; CHECK32-IZHINXMIN-NEXT: call __floatundihf
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -1551,7 +1551,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __floatundihf@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __floatundihf
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -1701,7 +1701,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __truncdfhf2@plt
+; RV32IZFH-NEXT: call __truncdfhf2
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -1710,7 +1710,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call __truncdfhf2@plt
+; RV64IZFH-NEXT: call __truncdfhf2
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
@@ -1719,7 +1719,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __truncdfhf2@plt
+; RV32IZHINX-NEXT: call __truncdfhf2
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -1728,7 +1728,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; RV64IZHINX: # %bb.0:
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call __truncdfhf2@plt
+; RV64IZHINX-NEXT: call __truncdfhf2
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
@@ -1763,7 +1763,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; RV32IFZFHMIN: # %bb.0:
; RV32IFZFHMIN-NEXT: addi sp, sp, -16
; RV32IFZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFZFHMIN-NEXT: call __truncdfhf2@plt
+; RV32IFZFHMIN-NEXT: call __truncdfhf2
; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFZFHMIN-NEXT: addi sp, sp, 16
; RV32IFZFHMIN-NEXT: ret
@@ -1772,7 +1772,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; RV64IFZFHMIN: # %bb.0:
; RV64IFZFHMIN-NEXT: addi sp, sp, -16
; RV64IFZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFZFHMIN-NEXT: call __truncdfhf2@plt
+; RV64IFZFHMIN-NEXT: call __truncdfhf2
; RV64IFZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFZFHMIN-NEXT: addi sp, sp, 16
; RV64IFZFHMIN-NEXT: ret
@@ -1781,7 +1781,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __truncdfhf2@plt
+; CHECK32-IZHINXMIN-NEXT: call __truncdfhf2
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -1790,7 +1790,7 @@ define half @fcvt_h_d(double %a) nounwind strictfp {
; CHECK64-IZHINXMIN: # %bb.0:
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK64-IZHINXMIN-NEXT: call __truncdfhf2@plt
+; CHECK64-IZHINXMIN-NEXT: call __truncdfhf2
; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK64-IZHINXMIN-NEXT: ret
@@ -1831,7 +1831,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call __extendsfdf2@plt
+; RV32IZFH-NEXT: call __extendsfdf2
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -1841,7 +1841,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call __extendsfdf2@plt
+; RV64IZFH-NEXT: call __extendsfdf2
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
@@ -1851,7 +1851,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call __extendsfdf2@plt
+; RV32IZHINX-NEXT: call __extendsfdf2
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -1861,7 +1861,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call __extendsfdf2@plt
+; RV64IZHINX-NEXT: call __extendsfdf2
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
@@ -1897,7 +1897,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; RV32IFZFHMIN-NEXT: addi sp, sp, -16
; RV32IFZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IFZFHMIN-NEXT: call __extendsfdf2@plt
+; RV32IFZFHMIN-NEXT: call __extendsfdf2
; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFZFHMIN-NEXT: addi sp, sp, 16
; RV32IFZFHMIN-NEXT: ret
@@ -1907,7 +1907,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; RV64IFZFHMIN-NEXT: addi sp, sp, -16
; RV64IFZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IFZFHMIN-NEXT: call __extendsfdf2@plt
+; RV64IFZFHMIN-NEXT: call __extendsfdf2
; RV64IFZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFZFHMIN-NEXT: addi sp, sp, 16
; RV64IFZFHMIN-NEXT: ret
@@ -1917,7 +1917,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK32-IZHINXMIN-NEXT: call __extendsfdf2@plt
+; CHECK32-IZHINXMIN-NEXT: call __extendsfdf2
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -1927,7 +1927,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK64-IZHINXMIN-NEXT: call __extendsfdf2@plt
+; CHECK64-IZHINXMIN-NEXT: call __extendsfdf2
; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK64-IZHINXMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 2d3f40e15fe4..daaceed3941c 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -91,8 +91,8 @@ define i16 @fcvt_si_h(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -103,8 +103,8 @@ define i16 @fcvt_si_h(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -113,7 +113,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.w.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -124,7 +124,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.l.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -135,7 +135,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -145,7 +145,7 @@ define i16 @fcvt_si_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.l.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -316,13 +316,13 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 815104
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: bgez s2, .LBB1_2
; RV32I-NEXT: # %bb.1: # %start
@@ -331,7 +331,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32I-NEXT: lui a0, 290816
; RV32I-NEXT: addi a1, a0, -512
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB1_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: lui s1, 8
@@ -339,7 +339,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32I-NEXT: .LBB1_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -359,13 +359,13 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 815104
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB1_2
; RV64I-NEXT: # %bb.1: # %start
@@ -374,7 +374,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64I-NEXT: lui a0, 290816
; RV64I-NEXT: addiw a1, a0, -512
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB1_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: lui s1, 8
@@ -382,7 +382,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64I-NEXT: .LBB1_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -397,7 +397,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: feq.s a0, fa5, fa5
; RV32ID-ILP32-NEXT: neg a0, a0
@@ -417,7 +417,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: feq.s a0, fa5, fa5
; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI1_0)
@@ -437,7 +437,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: feq.s a0, fa0, fa0
; RV32ID-NEXT: neg a0, a0
; RV32ID-NEXT: lui a1, %hi(.LCPI1_0)
@@ -456,7 +456,7 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: feq.s a0, fa0, fa0
; RV64ID-NEXT: lui a1, %hi(.LCPI1_0)
; RV64ID-NEXT: flw fa5, %lo(.LCPI1_0)(a1)
@@ -609,8 +609,8 @@ define i16 @fcvt_ui_h(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -621,8 +621,8 @@ define i16 @fcvt_ui_h(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -631,7 +631,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -642,7 +642,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -653,7 +653,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -663,7 +663,7 @@ define i16 @fcvt_ui_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -804,18 +804,18 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV32I-NEXT: lui s0, 16
; RV32I-NEXT: addi s0, s0, -1
; RV32I-NEXT: and a0, a0, s0
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s3, a0
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, s3
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a0, 292864
; RV32I-NEXT: addi a1, a0, -256
; RV32I-NEXT: mv a0, s3
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: bgtz a0, .LBB3_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slti a0, s2, 0
@@ -842,18 +842,18 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV64I-NEXT: lui s0, 16
; RV64I-NEXT: addiw s0, s0, -1
; RV64I-NEXT: and a0, a0, s0
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s3, a0
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s3
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: lui a0, 292864
; RV64I-NEXT: addiw a1, a0, -256
; RV64I-NEXT: mv a0, s3
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: bgtz a0, .LBB3_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: slti a0, s2, 0
@@ -873,7 +873,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI3_0)
; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI3_0)(a1)
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
@@ -889,7 +889,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI3_0)
; RV64ID-LP64-NEXT: flw fa5, %lo(.LCPI3_0)(a1)
; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
@@ -905,7 +905,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: lui a0, %hi(.LCPI3_0)
; RV32ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
; RV32ID-NEXT: fmv.w.x fa4, zero
@@ -920,7 +920,7 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: lui a0, %hi(.LCPI3_0)
; RV64ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
; RV64ID-NEXT: fmv.w.x fa4, zero
@@ -1030,8 +1030,8 @@ define i32 @fcvt_w_h(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1042,8 +1042,8 @@ define i32 @fcvt_w_h(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1052,7 +1052,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.w.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1063,7 +1063,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.l.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1074,7 +1074,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -1084,7 +1084,7 @@ define i32 @fcvt_w_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.l.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -1185,13 +1185,13 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 847872
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui s3, 524288
; RV32I-NEXT: bgez s2, .LBB5_2
@@ -1201,14 +1201,14 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV32I-NEXT: lui a1, 323584
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB5_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: addi s1, s3, -1
; RV32I-NEXT: .LBB5_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -1230,13 +1230,13 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 847872
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB5_2
@@ -1246,14 +1246,14 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV64I-NEXT: lui a1, 323584
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB5_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addiw s1, s3, -1
; RV64I-NEXT: .LBB5_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -1269,7 +1269,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.w.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
@@ -1284,7 +1284,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.w.s a0, fa5, rtz
; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
@@ -1299,7 +1299,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV32ID-NEXT: feq.s a1, fa0, fa0
; RV32ID-NEXT: seqz a1, a1
@@ -1313,7 +1313,7 @@ define i32 @fcvt_w_h_sat(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV64ID-NEXT: feq.s a1, fa0, fa0
; RV64ID-NEXT: seqz a1, a1
@@ -1420,8 +1420,8 @@ define i32 @fcvt_wu_h(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1432,8 +1432,8 @@ define i32 @fcvt_wu_h(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1442,7 +1442,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1453,7 +1453,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1464,7 +1464,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -1474,7 +1474,7 @@ define i32 @fcvt_wu_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -1563,8 +1563,8 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: seqz a1, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1577,8 +1577,8 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: seqz a1, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1589,7 +1589,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: seqz a1, a0
@@ -1602,7 +1602,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: seqz a1, a0
@@ -1615,7 +1615,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32ID-NEXT: seqz a1, a0
; RV32ID-NEXT: add a0, a0, a1
@@ -1627,7 +1627,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64ID-NEXT: seqz a1, a0
; RV64ID-NEXT: add a0, a0, a1
@@ -1778,20 +1778,20 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s1, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s2, a0, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: or a0, s1, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1810,18 +1810,18 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 325632
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB8_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -1843,7 +1843,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
@@ -1858,7 +1858,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.wu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
@@ -1875,7 +1875,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32ID-NEXT: feq.s a1, fa0, fa0
; RV32ID-NEXT: seqz a1, a1
@@ -1889,7 +1889,7 @@ define i32 @fcvt_wu_h_sat(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64ID-NEXT: feq.s a1, fa0, fa0
; RV64ID-NEXT: seqz a1, a1
@@ -1977,7 +1977,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __fixhfdi@plt
+; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -1991,7 +1991,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __fixhfdi@plt
+; RV32IDZFH-NEXT: call __fixhfdi
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -2005,7 +2005,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __fixhfdi@plt
+; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -2019,7 +2019,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __fixhfdi@plt
+; RV32IZDINXZHINX-NEXT: call __fixhfdi
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -2035,8 +2035,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfdi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2047,8 +2047,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2057,8 +2057,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
-; RV32ID-ILP32-NEXT: call __fixsfdi@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
+; RV32ID-ILP32-NEXT: call __fixsfdi
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-ILP32-NEXT: addi sp, sp, 16
; RV32ID-ILP32-NEXT: ret
@@ -2067,7 +2067,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.l.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -2078,8 +2078,8 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
-; RV32ID-NEXT: call __fixsfdi@plt
+; RV32ID-NEXT: call __extendhfsf2
+; RV32ID-NEXT: call __fixsfdi
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
; RV32ID-NEXT: ret
@@ -2088,7 +2088,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.l.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -2098,7 +2098,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __fixhfdi@plt
+; CHECK32-IZFHMIN-NEXT: call __fixhfdi
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -2113,7 +2113,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __fixhfdi@plt
+; CHECK32-IZHINXMIN-NEXT: call __fixhfdi
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -2128,7 +2128,7 @@ define i64 @fcvt_l_h(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __fixhfdi@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __fixhfdi
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -2154,7 +2154,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, a0
; RV32IZFH-NEXT: fle.s s0, fa5, fs0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixsfdi@plt
+; RV32IZFH-NEXT: call __fixsfdi
; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: lui a2, 524288
; RV32IZFH-NEXT: beqz s0, .LBB10_2
@@ -2202,7 +2202,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IDZFH-NEXT: fmv.w.x fa5, a0
; RV32IDZFH-NEXT: fle.s s0, fa5, fs0
; RV32IDZFH-NEXT: fmv.s fa0, fs0
-; RV32IDZFH-NEXT: call __fixsfdi@plt
+; RV32IDZFH-NEXT: call __fixsfdi
; RV32IDZFH-NEXT: lui a4, 524288
; RV32IDZFH-NEXT: lui a2, 524288
; RV32IDZFH-NEXT: beqz s0, .LBB10_2
@@ -2257,7 +2257,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: fle.s s3, a0, s0
; RV32IZHINX-NEXT: neg s4, s3
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixsfdi@plt
+; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: and a0, s4, a0
; RV32IZHINX-NEXT: or a0, s2, a0
; RV32IZHINX-NEXT: feq.s a2, s0, s0
@@ -2310,7 +2310,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: fle.s s3, a0, s0
; RV32IZDINXZHINX-NEXT: neg s4, s3
; RV32IZDINXZHINX-NEXT: mv a0, s0
-; RV32IZDINXZHINX-NEXT: call __fixsfdi@plt
+; RV32IZDINXZHINX-NEXT: call __fixsfdi
; RV32IZDINXZHINX-NEXT: and a0, s4, a0
; RV32IZDINXZHINX-NEXT: or a0, s2, a0
; RV32IZDINXZHINX-NEXT: feq.s a2, s0, s0
@@ -2355,13 +2355,13 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a1, 913408
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __fixsfdi@plt
+; RV32I-NEXT: call __fixsfdi
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: lui s5, 524288
@@ -2372,7 +2372,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32I-NEXT: lui a1, 389120
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: blez a0, .LBB10_4
; RV32I-NEXT: # %bb.3: # %start
@@ -2380,7 +2380,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32I-NEXT: .LBB10_4: # %start
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a1, a0, s3
@@ -2411,13 +2411,13 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 913408
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li s3, -1
; RV64I-NEXT: bgez s2, .LBB10_2
@@ -2427,14 +2427,14 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV64I-NEXT: lui a1, 389120
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB10_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: srli s1, s3, 1
; RV64I-NEXT: .LBB10_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -2451,13 +2451,13 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
; RV32ID-ILP32-NEXT: lui a1, 913408
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a1
; RV32ID-ILP32-NEXT: fsw fa4, 4(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: fle.s s0, fa5, fa4
-; RV32ID-ILP32-NEXT: call __fixsfdi@plt
+; RV32ID-ILP32-NEXT: call __fixsfdi
; RV32ID-ILP32-NEXT: lui a4, 524288
; RV32ID-ILP32-NEXT: lui a2, 524288
; RV32ID-ILP32-NEXT: beqz s0, .LBB10_2
@@ -2490,7 +2490,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.l.s a0, fa5, rtz
; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
@@ -2507,12 +2507,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fmv.s fs0, fa0
; RV32ID-NEXT: lui a0, 913408
; RV32ID-NEXT: fmv.w.x fa5, a0
; RV32ID-NEXT: fle.s s0, fa5, fa0
-; RV32ID-NEXT: call __fixsfdi@plt
+; RV32ID-NEXT: call __fixsfdi
; RV32ID-NEXT: lui a4, 524288
; RV32ID-NEXT: lui a2, 524288
; RV32ID-NEXT: beqz s0, .LBB10_2
@@ -2544,7 +2544,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.l.s a0, fa0, rtz
; RV64ID-NEXT: feq.s a1, fa0, fa0
; RV64ID-NEXT: seqz a1, a1
@@ -2565,7 +2565,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IFZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IFZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IFZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IFZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IFZFHMIN-NEXT: call __fixsfdi
; RV32IFZFHMIN-NEXT: lui a4, 524288
; RV32IFZFHMIN-NEXT: lui a2, 524288
; RV32IFZFHMIN-NEXT: beqz s0, .LBB10_2
@@ -2614,7 +2614,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IDZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IDZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IDZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IDZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IDZFHMIN-NEXT: call __fixsfdi
; RV32IDZFHMIN-NEXT: lui a4, 524288
; RV32IDZFHMIN-NEXT: lui a2, 524288
; RV32IDZFHMIN-NEXT: beqz s0, .LBB10_2
@@ -2660,7 +2660,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: fle.s s3, a0, s0
; CHECK32-IZHINXMIN-NEXT: neg s4, s3
; CHECK32-IZHINXMIN-NEXT: mv a0, s0
-; CHECK32-IZHINXMIN-NEXT: call __fixsfdi@plt
+; CHECK32-IZHINXMIN-NEXT: call __fixsfdi
; CHECK32-IZHINXMIN-NEXT: and a0, s4, a0
; CHECK32-IZHINXMIN-NEXT: or a0, s2, a0
; CHECK32-IZHINXMIN-NEXT: feq.s a2, s0, s0
@@ -2714,7 +2714,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: fle.s s3, a0, s0
; CHECK32-IZDINXZHINXMIN-NEXT: neg s4, s3
; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0
-; CHECK32-IZDINXZHINXMIN-NEXT: call __fixsfdi@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __fixsfdi
; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s4, a0
; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s2, a0
; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a2, s0, s0
@@ -2760,7 +2760,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __fixunshfdi@plt
+; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -2774,7 +2774,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __fixunshfdi@plt
+; RV32IDZFH-NEXT: call __fixunshfdi
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -2788,7 +2788,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -2802,7 +2802,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZDINXZHINX-NEXT: call __fixunshfdi
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -2818,8 +2818,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixunssfdi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixunssfdi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2830,8 +2830,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2840,8 +2840,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
-; RV32ID-ILP32-NEXT: call __fixunssfdi@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
+; RV32ID-ILP32-NEXT: call __fixunssfdi
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-ILP32-NEXT: addi sp, sp, 16
; RV32ID-ILP32-NEXT: ret
@@ -2850,7 +2850,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -2861,8 +2861,8 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
-; RV32ID-NEXT: call __fixunssfdi@plt
+; RV32ID-NEXT: call __extendhfsf2
+; RV32ID-NEXT: call __fixunssfdi
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
; RV32ID-NEXT: ret
@@ -2871,7 +2871,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -2881,7 +2881,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __fixunshfdi@plt
+; CHECK32-IZFHMIN-NEXT: call __fixunshfdi
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -2896,7 +2896,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __fixunshfdi@plt
+; CHECK32-IZHINXMIN-NEXT: call __fixunshfdi
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -2911,7 +2911,7 @@ define i64 @fcvt_lu_h(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunshfdi@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunshfdi
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -2940,7 +2940,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, zero
; RV32IZFH-NEXT: fle.s a0, fa5, fa0
; RV32IZFH-NEXT: neg s1, a0
-; RV32IZFH-NEXT: call __fixunssfdi@plt
+; RV32IZFH-NEXT: call __fixunssfdi
; RV32IZFH-NEXT: and a0, s1, a0
; RV32IZFH-NEXT: or a0, s0, a0
; RV32IZFH-NEXT: and a1, s1, a1
@@ -2974,7 +2974,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IDZFH-NEXT: fmv.w.x fa5, zero
; RV32IDZFH-NEXT: fle.s a0, fa5, fa0
; RV32IDZFH-NEXT: neg s1, a0
-; RV32IDZFH-NEXT: call __fixunssfdi@plt
+; RV32IDZFH-NEXT: call __fixunssfdi
; RV32IDZFH-NEXT: and a0, s1, a0
; RV32IDZFH-NEXT: or a0, s0, a0
; RV32IDZFH-NEXT: and a1, s1, a1
@@ -3007,7 +3007,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: neg s0, a1
; RV32IZHINX-NEXT: fle.s a1, zero, a0
; RV32IZHINX-NEXT: neg s1, a1
-; RV32IZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: or a0, s0, a0
; RV32IZHINX-NEXT: and a1, s1, a1
@@ -3040,7 +3040,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: neg s0, a1
; RV32IZDINXZHINX-NEXT: fle.s a1, zero, a0
; RV32IZDINXZHINX-NEXT: neg s1, a1
-; RV32IZDINXZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZDINXZHINX-NEXT: call __fixunssfdi
; RV32IZDINXZHINX-NEXT: and a0, s1, a0
; RV32IZDINXZHINX-NEXT: or a0, s0, a0
; RV32IZDINXZHINX-NEXT: and a1, s1, a1
@@ -3067,20 +3067,20 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 391168
; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s1, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s2, a0, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixunssfdi@plt
+; RV32I-NEXT: call __fixunssfdi
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: or a0, s1, a0
; RV32I-NEXT: and a1, s2, a1
@@ -3101,20 +3101,20 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 391168
; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: sgtz a0, a0
; RV64I-NEXT: neg s1, a0
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: slti a0, a0, 0
; RV64I-NEXT: addi s2, a0, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: and a0, s2, a0
; RV64I-NEXT: or a0, s1, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -3130,7 +3130,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI12_0)
; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI12_0)(a1)
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
@@ -3139,7 +3139,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32ID-ILP32-NEXT: fmv.w.x fa5, zero
; RV32ID-ILP32-NEXT: fle.s a1, fa5, fa4
; RV32ID-ILP32-NEXT: neg s1, a1
-; RV32ID-ILP32-NEXT: call __fixunssfdi@plt
+; RV32ID-ILP32-NEXT: call __fixunssfdi
; RV32ID-ILP32-NEXT: and a0, s1, a0
; RV32ID-ILP32-NEXT: or a0, s0, a0
; RV32ID-ILP32-NEXT: and a1, s1, a1
@@ -3154,7 +3154,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
@@ -3171,7 +3171,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: lui a0, %hi(.LCPI12_0)
; RV32ID-NEXT: flw fa5, %lo(.LCPI12_0)(a0)
; RV32ID-NEXT: flt.s a0, fa5, fa0
@@ -3179,7 +3179,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32ID-NEXT: fmv.w.x fa5, zero
; RV32ID-NEXT: fle.s a0, fa5, fa0
; RV32ID-NEXT: neg s1, a0
-; RV32ID-NEXT: call __fixunssfdi@plt
+; RV32ID-NEXT: call __fixunssfdi
; RV32ID-NEXT: and a0, s1, a0
; RV32ID-NEXT: or a0, s0, a0
; RV32ID-NEXT: and a1, s1, a1
@@ -3194,7 +3194,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64ID-NEXT: feq.s a1, fa0, fa0
; RV64ID-NEXT: seqz a1, a1
@@ -3218,7 +3218,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK32-IZFHMIN-NEXT: fmv.w.x fa5, zero
; CHECK32-IZFHMIN-NEXT: fle.s a0, fa5, fa0
; CHECK32-IZFHMIN-NEXT: neg s1, a0
-; CHECK32-IZFHMIN-NEXT: call __fixunssfdi@plt
+; CHECK32-IZFHMIN-NEXT: call __fixunssfdi
; CHECK32-IZFHMIN-NEXT: and a0, s1, a0
; CHECK32-IZFHMIN-NEXT: or a0, s0, a0
; CHECK32-IZFHMIN-NEXT: and a1, s1, a1
@@ -3252,7 +3252,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: neg s0, a1
; CHECK32-IZHINXMIN-NEXT: fle.s a1, zero, a0
; CHECK32-IZHINXMIN-NEXT: neg s1, a1
-; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi@plt
+; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi
; CHECK32-IZHINXMIN-NEXT: and a0, s1, a0
; CHECK32-IZHINXMIN-NEXT: or a0, s0, a0
; CHECK32-IZHINXMIN-NEXT: and a1, s1, a1
@@ -3286,7 +3286,7 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: neg s0, a1
; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a1, zero, a0
; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a1
-; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi
; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s1, a0
; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s0, a0
; CHECK32-IZDINXZHINXMIN-NEXT: and a1, s1, a1
@@ -3375,8 +3375,8 @@ define half @fcvt_h_si(i16 %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
-; RV32I-NEXT: call __floatsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3387,8 +3387,8 @@ define half @fcvt_h_si(i16 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
-; RV64I-NEXT: call __floatsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3401,7 +3401,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
; RV32ID-ILP32-NEXT: srai a0, a0, 16
; RV32ID-ILP32-NEXT: fcvt.s.w fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3416,7 +3416,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
; RV64ID-LP64-NEXT: srai a0, a0, 48
; RV64ID-LP64-NEXT: fcvt.s.w fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -3430,7 +3430,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
; RV32ID-NEXT: slli a0, a0, 16
; RV32ID-NEXT: srai a0, a0, 16
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -3446,7 +3446,7 @@ define half @fcvt_h_si(i16 %a) nounwind {
; RV64ID-NEXT: slli a0, a0, 48
; RV64ID-NEXT: srai a0, a0, 48
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -3536,8 +3536,8 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3546,8 +3546,8 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3558,7 +3558,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: fcvt.s.w fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3571,7 +3571,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: fcvt.s.w fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -3583,7 +3583,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -3597,7 +3597,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -3708,8 +3708,8 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __floatunsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatunsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3720,8 +3720,8 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __floatunsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatunsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3734,7 +3734,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV32ID-ILP32-NEXT: srli a0, a0, 16
; RV32ID-ILP32-NEXT: fcvt.s.wu fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3749,7 +3749,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV64ID-LP64-NEXT: srli a0, a0, 48
; RV64ID-LP64-NEXT: fcvt.s.wu fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -3763,7 +3763,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV32ID-NEXT: slli a0, a0, 16
; RV32ID-NEXT: srli a0, a0, 16
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -3779,7 +3779,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV64ID-NEXT: slli a0, a0, 48
; RV64ID-NEXT: srli a0, a0, 48
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -3869,8 +3869,8 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatunsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -3879,8 +3879,8 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatunsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatunsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3891,7 +3891,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: fcvt.s.wu fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -3904,7 +3904,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: fcvt.s.wu fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -3916,7 +3916,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -3930,7 +3930,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -4008,8 +4008,8 @@ define half @fcvt_h_w(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4019,8 +4019,8 @@ define half @fcvt_h_w(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4031,7 +4031,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: fcvt.s.w fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4044,7 +4044,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: fcvt.s.w fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -4056,7 +4056,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -4070,7 +4070,7 @@ define half @fcvt_h_w(i32 %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -4157,8 +4157,8 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4168,8 +4168,8 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4181,7 +4181,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
; RV32ID-ILP32-NEXT: lw a0, 0(a0)
; RV32ID-ILP32-NEXT: fcvt.s.w fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4195,7 +4195,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
; RV64ID-LP64-NEXT: lw a0, 0(a0)
; RV64ID-LP64-NEXT: fcvt.s.w fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -4208,7 +4208,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: lw a0, 0(a0)
; RV32ID-NEXT: fcvt.s.w fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -4223,7 +4223,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind {
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: lw a0, 0(a0)
; RV64ID-NEXT: fcvt.s.w fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -4308,8 +4308,8 @@ define half @fcvt_h_wu(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatunsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatunsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4319,8 +4319,8 @@ define half @fcvt_h_wu(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
-; RV64I-NEXT: call __floatunsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatunsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4331,7 +4331,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: fcvt.s.wu fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4344,7 +4344,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: fcvt.s.wu fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -4356,7 +4356,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -4370,7 +4370,7 @@ define half @fcvt_h_wu(i32 %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -4478,8 +4478,8 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: lw a0, 0(a0)
-; RV32I-NEXT: call __floatunsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatunsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4489,8 +4489,8 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lw a0, 0(a0)
-; RV64I-NEXT: call __floatunsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatunsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4502,7 +4502,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
; RV32ID-ILP32-NEXT: lw a0, 0(a0)
; RV32ID-ILP32-NEXT: fcvt.s.wu fa5, a0
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4516,7 +4516,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
; RV64ID-LP64-NEXT: lwu a0, 0(a0)
; RV64ID-LP64-NEXT: fcvt.s.wu fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -4529,7 +4529,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: lw a0, 0(a0)
; RV32ID-NEXT: fcvt.s.wu fa0, a0
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -4544,7 +4544,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind {
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: lwu a0, 0(a0)
; RV64ID-NEXT: fcvt.s.wu fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -4604,7 +4604,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __floatdihf@plt
+; RV32IZFH-NEXT: call __floatdihf
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -4618,7 +4618,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __floatdihf@plt
+; RV32IDZFH-NEXT: call __floatdihf
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -4632,7 +4632,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __floatdihf@plt
+; RV32IZHINX-NEXT: call __floatdihf
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -4646,7 +4646,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __floatdihf@plt
+; RV32IZDINXZHINX-NEXT: call __floatdihf
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -4660,8 +4660,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatdisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatdisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4670,8 +4670,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatdisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatdisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4680,8 +4680,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __floatdisf@plt
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __floatdisf
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4694,7 +4694,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: fcvt.s.l fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -4705,8 +4705,8 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __floatdisf@plt
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __floatdisf
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -4720,7 +4720,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.l fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -4733,7 +4733,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __floatdihf@plt
+; CHECK32-IZFHMIN-NEXT: call __floatdihf
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -4748,7 +4748,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __floatdihf@plt
+; CHECK32-IZHINXMIN-NEXT: call __floatdihf
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -4763,7 +4763,7 @@ define half @fcvt_h_l(i64 %a) nounwind {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __floatdihf@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __floatdihf
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -4782,7 +4782,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __floatundihf@plt
+; RV32IZFH-NEXT: call __floatundihf
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -4796,7 +4796,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV32IDZFH: # %bb.0:
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: call __floatundihf@plt
+; RV32IDZFH-NEXT: call __floatundihf
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
@@ -4810,7 +4810,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __floatundihf@plt
+; RV32IZHINX-NEXT: call __floatundihf
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -4824,7 +4824,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV32IZDINXZHINX: # %bb.0:
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: call __floatundihf@plt
+; RV32IZDINXZHINX-NEXT: call __floatundihf
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -4838,8 +4838,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __floatundisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatundisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4848,8 +4848,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __floatundisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatundisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -4858,8 +4858,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __floatundisf@plt
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __floatundisf
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -4872,7 +4872,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: fcvt.s.lu fa5, a0
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -4883,8 +4883,8 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __floatundisf@plt
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __floatundisf
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -4898,7 +4898,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fcvt.s.lu fa0, a0
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -4911,7 +4911,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; CHECK32-IZFHMIN: # %bb.0:
; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: call __floatundihf@plt
+; CHECK32-IZFHMIN-NEXT: call __floatundihf
; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
; CHECK32-IZFHMIN-NEXT: ret
@@ -4926,7 +4926,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __floatundihf@plt
+; CHECK32-IZHINXMIN-NEXT: call __floatundihf
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -4941,7 +4941,7 @@ define half @fcvt_h_lu(i64 %a) nounwind {
; CHECK32-IZDINXZHINXMIN: # %bb.0:
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: call __floatundihf@plt
+; CHECK32-IZDINXZHINXMIN-NEXT: call __floatundihf
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZDINXZHINXMIN-NEXT: ret
@@ -4985,7 +4985,7 @@ define half @fcvt_h_s(float %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -4994,7 +4994,7 @@ define half @fcvt_h_s(float %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -5003,7 +5003,7 @@ define half @fcvt_h_s(float %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -5014,7 +5014,7 @@ define half @fcvt_h_s(float %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -5025,7 +5025,7 @@ define half @fcvt_h_s(float %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -5038,7 +5038,7 @@ define half @fcvt_h_s(float %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -5110,7 +5110,7 @@ define float @fcvt_s_h(half %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -5119,7 +5119,7 @@ define float @fcvt_s_h(half %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -5128,7 +5128,7 @@ define float @fcvt_s_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-ILP32-NEXT: addi sp, sp, 16
; RV32ID-ILP32-NEXT: ret
@@ -5137,7 +5137,7 @@ define float @fcvt_s_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-LP64-NEXT: addi sp, sp, 16
; RV64ID-LP64-NEXT: ret
@@ -5146,7 +5146,7 @@ define float @fcvt_s_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
; RV32ID-NEXT: ret
@@ -5155,7 +5155,7 @@ define float @fcvt_s_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
; RV64ID-NEXT: ret
@@ -5198,7 +5198,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: call __truncdfhf2@plt
+; RV32IZFH-NEXT: call __truncdfhf2
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -5207,7 +5207,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: call __truncdfhf2@plt
+; RV64IZFH-NEXT: call __truncdfhf2
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
@@ -5226,7 +5226,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: call __truncdfhf2@plt
+; RV32IZHINX-NEXT: call __truncdfhf2
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -5235,7 +5235,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64IZHINX: # %bb.0:
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: call __truncdfhf2@plt
+; RV64IZHINX-NEXT: call __truncdfhf2
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
@@ -5260,7 +5260,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __truncdfhf2@plt
+; RV32I-NEXT: call __truncdfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -5269,7 +5269,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __truncdfhf2@plt
+; RV64I-NEXT: call __truncdfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -5278,7 +5278,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __truncdfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncdfhf2
; RV32ID-ILP32-NEXT: lui a1, 1048560
; RV32ID-ILP32-NEXT: or a0, a0, a1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -5289,7 +5289,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __truncdfhf2@plt
+; RV64ID-LP64-NEXT: call __truncdfhf2
; RV64ID-LP64-NEXT: lui a1, 1048560
; RV64ID-LP64-NEXT: or a0, a0, a1
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -5300,7 +5300,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __truncdfhf2@plt
+; RV32ID-NEXT: call __truncdfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 1048560
; RV32ID-NEXT: or a0, a0, a1
@@ -5313,7 +5313,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __truncdfhf2@plt
+; RV64ID-NEXT: call __truncdfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 1048560
; RV64ID-NEXT: or a0, a0, a1
@@ -5326,7 +5326,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV32IFZFHMIN: # %bb.0:
; RV32IFZFHMIN-NEXT: addi sp, sp, -16
; RV32IFZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFZFHMIN-NEXT: call __truncdfhf2@plt
+; RV32IFZFHMIN-NEXT: call __truncdfhf2
; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFZFHMIN-NEXT: addi sp, sp, 16
; RV32IFZFHMIN-NEXT: ret
@@ -5335,7 +5335,7 @@ define half @fcvt_h_d(double %a) nounwind {
; RV64IFZFHMIN: # %bb.0:
; RV64IFZFHMIN-NEXT: addi sp, sp, -16
; RV64IFZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFZFHMIN-NEXT: call __truncdfhf2@plt
+; RV64IFZFHMIN-NEXT: call __truncdfhf2
; RV64IFZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFZFHMIN-NEXT: addi sp, sp, 16
; RV64IFZFHMIN-NEXT: ret
@@ -5354,7 +5354,7 @@ define half @fcvt_h_d(double %a) nounwind {
; CHECK32-IZHINXMIN: # %bb.0:
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: call __truncdfhf2@plt
+; CHECK32-IZHINXMIN-NEXT: call __truncdfhf2
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -5363,7 +5363,7 @@ define half @fcvt_h_d(double %a) nounwind {
; CHECK64-IZHINXMIN: # %bb.0:
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK64-IZHINXMIN-NEXT: call __truncdfhf2@plt
+; CHECK64-IZHINXMIN-NEXT: call __truncdfhf2
; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK64-IZHINXMIN-NEXT: ret
@@ -5393,7 +5393,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call __extendsfdf2@plt
+; RV32IZFH-NEXT: call __extendsfdf2
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -5403,7 +5403,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call __extendsfdf2@plt
+; RV64IZFH-NEXT: call __extendsfdf2
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
@@ -5423,7 +5423,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call __extendsfdf2@plt
+; RV32IZHINX-NEXT: call __extendsfdf2
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -5433,7 +5433,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call __extendsfdf2@plt
+; RV64IZHINX-NEXT: call __extendsfdf2
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
@@ -5458,8 +5458,8 @@ define double @fcvt_d_h(half %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __extendsfdf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __extendsfdf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -5468,8 +5468,8 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __extendsfdf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __extendsfdf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -5478,7 +5478,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.d.s fa5, fa5
; RV32ID-ILP32-NEXT: fsd fa5, 0(sp)
@@ -5492,7 +5492,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.d.s fa5, fa5
; RV64ID-LP64-NEXT: fmv.x.d a0, fa5
@@ -5504,7 +5504,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.d.s fa0, fa0
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -5514,7 +5514,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.d.s fa0, fa0
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -5525,7 +5525,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV32IFZFHMIN-NEXT: addi sp, sp, -16
; RV32IFZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IFZFHMIN-NEXT: call __extendsfdf2@plt
+; RV32IFZFHMIN-NEXT: call __extendsfdf2
; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFZFHMIN-NEXT: addi sp, sp, 16
; RV32IFZFHMIN-NEXT: ret
@@ -5535,7 +5535,7 @@ define double @fcvt_d_h(half %a) nounwind {
; RV64IFZFHMIN-NEXT: addi sp, sp, -16
; RV64IFZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IFZFHMIN-NEXT: call __extendsfdf2@plt
+; RV64IFZFHMIN-NEXT: call __extendsfdf2
; RV64IFZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFZFHMIN-NEXT: addi sp, sp, 16
; RV64IFZFHMIN-NEXT: ret
@@ -5555,7 +5555,7 @@ define double @fcvt_d_h(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK32-IZHINXMIN-NEXT: call __extendsfdf2@plt
+; CHECK32-IZHINXMIN-NEXT: call __extendsfdf2
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK32-IZHINXMIN-NEXT: ret
@@ -5565,7 +5565,7 @@ define double @fcvt_d_h(half %a) nounwind {
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16
; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK64-IZHINXMIN-NEXT: call __extendsfdf2@plt
+; CHECK64-IZHINXMIN-NEXT: call __extendsfdf2
; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16
; CHECK64-IZHINXMIN-NEXT: ret
@@ -5826,8 +5826,8 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: sh a0, 0(s0)
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -5845,8 +5845,8 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: sh a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -5865,7 +5865,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32ID-ILP32-NEXT: addi s1, a0, 1
; RV32ID-ILP32-NEXT: fcvt.s.w fa5, s1
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: sh a0, 0(s0)
; RV32ID-ILP32-NEXT: mv a0, s1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -5884,7 +5884,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64ID-LP64-NEXT: addiw s1, a0, 1
; RV64ID-LP64-NEXT: fcvt.s.w fa5, s1
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: sh a0, 0(s0)
; RV64ID-LP64-NEXT: mv a0, s1
; RV64ID-LP64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -5902,7 +5902,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32ID-NEXT: mv s0, a1
; RV32ID-NEXT: addi s1, a0, 1
; RV32ID-NEXT: fcvt.s.w fa0, s1
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: sh a0, 0(s0)
; RV32ID-NEXT: mv a0, s1
@@ -5921,7 +5921,7 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64ID-NEXT: mv s0, a1
; RV64ID-NEXT: addiw s1, a0, 1
; RV64ID-NEXT: fcvt.s.w fa0, s1
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: sh a0, 0(s0)
; RV64ID-NEXT: mv a0, s1
@@ -6051,8 +6051,8 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: addi s1, a0, 1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __floatunsisf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __floatunsisf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: sh a0, 0(s0)
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -6070,8 +6070,8 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: addiw s1, a0, 1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __floatunsisf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __floatunsisf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: sh a0, 0(s0)
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -6090,7 +6090,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32ID-ILP32-NEXT: addi s1, a0, 1
; RV32ID-ILP32-NEXT: fcvt.s.wu fa5, s1
; RV32ID-ILP32-NEXT: fmv.x.w a0, fa5
-; RV32ID-ILP32-NEXT: call __truncsfhf2@plt
+; RV32ID-ILP32-NEXT: call __truncsfhf2
; RV32ID-ILP32-NEXT: sh a0, 0(s0)
; RV32ID-ILP32-NEXT: mv a0, s1
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -6109,7 +6109,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64ID-LP64-NEXT: addiw s1, a0, 1
; RV64ID-LP64-NEXT: fcvt.s.wu fa5, s1
; RV64ID-LP64-NEXT: fmv.x.w a0, fa5
-; RV64ID-LP64-NEXT: call __truncsfhf2@plt
+; RV64ID-LP64-NEXT: call __truncsfhf2
; RV64ID-LP64-NEXT: sh a0, 0(s0)
; RV64ID-LP64-NEXT: mv a0, s1
; RV64ID-LP64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -6127,7 +6127,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV32ID-NEXT: mv s0, a1
; RV32ID-NEXT: addi s1, a0, 1
; RV32ID-NEXT: fcvt.s.wu fa0, s1
-; RV32ID-NEXT: call __truncsfhf2@plt
+; RV32ID-NEXT: call __truncsfhf2
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: sh a0, 0(s0)
; RV32ID-NEXT: mv a0, s1
@@ -6146,7 +6146,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64ID-NEXT: mv s0, a1
; RV64ID-NEXT: addiw s1, a0, 1
; RV64ID-NEXT: fcvt.s.wu fa0, s1
-; RV64ID-NEXT: call __truncsfhf2@plt
+; RV64ID-NEXT: call __truncsfhf2
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: sh a0, 0(s0)
; RV64ID-NEXT: mv a0, s1
@@ -6262,8 +6262,8 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -6274,8 +6274,8 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -6284,7 +6284,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.w.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -6295,7 +6295,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.l.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -6306,7 +6306,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -6316,7 +6316,7 @@ define signext i16 @fcvt_w_s_i16(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.l.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -6487,13 +6487,13 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 815104
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: bgez s2, .LBB32_2
; RV32I-NEXT: # %bb.1: # %start
@@ -6502,7 +6502,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32I-NEXT: lui a0, 290816
; RV32I-NEXT: addi a1, a0, -512
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB32_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: lui s1, 8
@@ -6510,7 +6510,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32I-NEXT: .LBB32_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -6532,13 +6532,13 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 815104
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB32_2
; RV64I-NEXT: # %bb.1: # %start
@@ -6547,7 +6547,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64I-NEXT: lui a0, 290816
; RV64I-NEXT: addiw a1, a0, -512
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB32_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: lui s1, 8
@@ -6555,7 +6555,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64I-NEXT: .LBB32_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -6572,7 +6572,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: feq.s a0, fa5, fa5
; RV32ID-ILP32-NEXT: neg a0, a0
@@ -6592,7 +6592,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: feq.s a0, fa5, fa5
; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI32_0)
@@ -6612,7 +6612,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: feq.s a0, fa0, fa0
; RV32ID-NEXT: neg a0, a0
; RV32ID-NEXT: lui a1, %hi(.LCPI32_0)
@@ -6631,7 +6631,7 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: feq.s a0, fa0, fa0
; RV64ID-NEXT: lui a1, %hi(.LCPI32_0)
; RV64ID-NEXT: flw fa5, %lo(.LCPI32_0)(a1)
@@ -6783,8 +6783,8 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -6795,8 +6795,8 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -6805,7 +6805,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -6816,7 +6816,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -6827,7 +6827,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -6837,7 +6837,7 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -6978,18 +6978,18 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV32I-NEXT: lui s3, 16
; RV32I-NEXT: addi s3, s3, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 292864
; RV32I-NEXT: addi a1, a0, -256
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB34_2
; RV32I-NEXT: # %bb.1: # %start
; RV32I-NEXT: mv a0, s3
@@ -7019,18 +7019,18 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV64I-NEXT: lui s3, 16
; RV64I-NEXT: addiw s3, s3, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 292864
; RV64I-NEXT: addiw a1, a0, -256
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB34_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: mv a0, s3
@@ -7053,7 +7053,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI34_0)
; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI34_0)(a1)
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
@@ -7069,7 +7069,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI34_0)
; RV64ID-LP64-NEXT: flw fa5, %lo(.LCPI34_0)(a1)
; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
@@ -7085,7 +7085,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: lui a0, %hi(.LCPI34_0)
; RV32ID-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
; RV32ID-NEXT: fmv.w.x fa4, zero
@@ -7100,7 +7100,7 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: lui a0, %hi(.LCPI34_0)
; RV64ID-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
; RV64ID-NEXT: fmv.w.x fa4, zero
@@ -7224,8 +7224,8 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7236,8 +7236,8 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7246,7 +7246,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.w.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -7257,7 +7257,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.l.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -7268,7 +7268,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -7278,7 +7278,7 @@ define signext i8 @fcvt_w_s_i8(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.l.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -7445,13 +7445,13 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 798720
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: bgez s2, .LBB36_2
; RV32I-NEXT: # %bb.1: # %start
@@ -7459,14 +7459,14 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV32I-NEXT: .LBB36_2: # %start
; RV32I-NEXT: lui a1, 274400
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB36_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: li s1, 127
; RV32I-NEXT: .LBB36_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -7488,13 +7488,13 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 798720
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: bgez s2, .LBB36_2
; RV64I-NEXT: # %bb.1: # %start
@@ -7502,14 +7502,14 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV64I-NEXT: .LBB36_2: # %start
; RV64I-NEXT: lui a1, 274400
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB36_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: li s1, 127
; RV64I-NEXT: .LBB36_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -7526,7 +7526,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: feq.s a0, fa5, fa5
; RV32ID-ILP32-NEXT: neg a0, a0
@@ -7546,7 +7546,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: feq.s a0, fa5, fa5
; RV64ID-LP64-NEXT: neg a0, a0
@@ -7566,7 +7566,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: feq.s a0, fa0, fa0
; RV32ID-NEXT: neg a0, a0
; RV32ID-NEXT: lui a1, 798720
@@ -7585,7 +7585,7 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: feq.s a0, fa0, fa0
; RV64ID-NEXT: neg a0, a0
; RV64ID-NEXT: lui a1, 798720
@@ -7734,8 +7734,8 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -7746,8 +7746,8 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -7756,7 +7756,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
; RV32ID-ILP32: # %bb.0:
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -7767,7 +7767,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
; RV64ID-LP64: # %bb.0:
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -7778,7 +7778,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
; RV32ID: # %bb.0:
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -7788,7 +7788,7 @@ define zeroext i8 @fcvt_wu_s_i8(half %a) nounwind {
; RV64ID: # %bb.0:
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -7923,17 +7923,17 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a1, 276464
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB38_2
; RV32I-NEXT: # %bb.1: # %start
; RV32I-NEXT: li a0, 255
@@ -7960,17 +7960,17 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 276464
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB38_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, 255
@@ -7992,7 +7992,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fmv.w.x fa4, zero
; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4
@@ -8008,7 +8008,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fmv.w.x fa4, zero
; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4
@@ -8024,7 +8024,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fmv.w.x fa5, zero
; RV32ID-NEXT: fmax.s fa5, fa0, fa5
; RV32ID-NEXT: lui a0, 276464
@@ -8039,7 +8039,7 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fmv.w.x fa5, zero
; RV64ID-NEXT: fmax.s fa5, fa0, fa5
; RV64ID-NEXT: lui a0, 276464
@@ -8203,20 +8203,20 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 325632
; RV32I-NEXT: addi a1, a1, -1
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: sgtz a0, a0
; RV32I-NEXT: neg s1, a0
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: addi s2, a0, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixunssfsi@plt
+; RV32I-NEXT: call __fixunssfsi
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: or a0, s1, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -8235,18 +8235,18 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __fixunssfdi@plt
+; RV64I-NEXT: call __fixunssfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a1, 325632
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB39_2
; RV64I-NEXT: # %bb.1: # %start
; RV64I-NEXT: li a0, -1
@@ -8270,7 +8270,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
@@ -8285,7 +8285,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.wu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
@@ -8302,7 +8302,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32ID-NEXT: feq.s a1, fa0, fa0
; RV32ID-NEXT: seqz a1, a1
@@ -8316,7 +8316,7 @@ define zeroext i32 @fcvt_wu_h_sat_zext(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64ID-NEXT: feq.s a1, fa0, fa0
; RV64ID-NEXT: seqz a1, a1
@@ -8454,13 +8454,13 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: lui a1, 847872
-; RV32I-NEXT: call __gesf2@plt
+; RV32I-NEXT: call __gesf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __fixsfsi@plt
+; RV32I-NEXT: call __fixsfsi
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui s3, 524288
; RV32I-NEXT: bgez s2, .LBB40_2
@@ -8470,14 +8470,14 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV32I-NEXT: lui a1, 323584
; RV32I-NEXT: addi a1, a1, -1
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __gtsf2@plt
+; RV32I-NEXT: call __gtsf2
; RV32I-NEXT: blez a0, .LBB40_4
; RV32I-NEXT: # %bb.3: # %start
; RV32I-NEXT: addi s1, s3, -1
; RV32I-NEXT: .LBB40_4: # %start
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __unordsf2@plt
+; RV32I-NEXT: call __unordsf2
; RV32I-NEXT: snez a0, a0
; RV32I-NEXT: addi a0, a0, -1
; RV32I-NEXT: and a0, a0, s1
@@ -8499,13 +8499,13 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: lui a1, 847872
-; RV64I-NEXT: call __gesf2@plt
+; RV64I-NEXT: call __gesf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __fixsfdi@plt
+; RV64I-NEXT: call __fixsfdi
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui s3, 524288
; RV64I-NEXT: bgez s2, .LBB40_2
@@ -8515,14 +8515,14 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV64I-NEXT: lui a1, 323584
; RV64I-NEXT: addiw a1, a1, -1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __gtsf2@plt
+; RV64I-NEXT: call __gtsf2
; RV64I-NEXT: blez a0, .LBB40_4
; RV64I-NEXT: # %bb.3: # %start
; RV64I-NEXT: addi s1, s3, -1
; RV64I-NEXT: .LBB40_4: # %start
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __unordsf2@plt
+; RV64I-NEXT: call __unordsf2
; RV64I-NEXT: snez a0, a0
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: and a0, a0, s1
@@ -8539,7 +8539,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV32ID-ILP32: # %bb.0: # %start
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-ILP32-NEXT: call __extendhfsf2@plt
+; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
; RV32ID-ILP32-NEXT: fcvt.w.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
@@ -8554,7 +8554,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV64ID-LP64: # %bb.0: # %start
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-LP64-NEXT: call __extendhfsf2@plt
+; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
; RV64ID-LP64-NEXT: fcvt.w.s a0, fa5, rtz
; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
@@ -8569,7 +8569,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV32ID: # %bb.0: # %start
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: call __extendhfsf2@plt
+; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV32ID-NEXT: feq.s a1, fa0, fa0
; RV32ID-NEXT: seqz a1, a1
@@ -8583,7 +8583,7 @@ define signext i32 @fcvt_w_h_sat_sext(half %a) nounwind {
; RV64ID: # %bb.0: # %start
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: call __extendhfsf2@plt
+; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: fcvt.w.s a0, fa0, rtz
; RV64ID-NEXT: feq.s a1, fa0, fa0
; RV64ID-NEXT: seqz a1, a1
diff --git a/llvm/test/CodeGen/RISCV/half-frem.ll b/llvm/test/CodeGen/RISCV/half-frem.ll
index 73d1760c8596..a262094190e1 100644
--- a/llvm/test/CodeGen/RISCV/half-frem.ll
+++ b/llvm/test/CodeGen/RISCV/half-frem.ll
@@ -31,7 +31,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
; RV32IZFH-NEXT: fcvt.s.h fa1, fa1
-; RV32IZFH-NEXT: call fmodf@plt
+; RV32IZFH-NEXT: call fmodf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -43,7 +43,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
; RV64IZFH-NEXT: fcvt.s.h fa1, fa1
-; RV64IZFH-NEXT: call fmodf@plt
+; RV64IZFH-NEXT: call fmodf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -55,7 +55,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
; RV32IZHINX-NEXT: fcvt.s.h a1, a1
-; RV32IZHINX-NEXT: call fmodf@plt
+; RV32IZHINX-NEXT: call fmodf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -67,7 +67,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
; RV64IZHINX-NEXT: fcvt.s.h a1, a1
-; RV64IZHINX-NEXT: call fmodf@plt
+; RV64IZHINX-NEXT: call fmodf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -79,7 +79,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
; RV32IZFHMIN-NEXT: fcvt.s.h fa1, fa1
-; RV32IZFHMIN-NEXT: call fmodf@plt
+; RV32IZFHMIN-NEXT: call fmodf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -91,7 +91,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
; RV64IZFHMIN-NEXT: fcvt.s.h fa1, fa1
-; RV64IZFHMIN-NEXT: call fmodf@plt
+; RV64IZFHMIN-NEXT: call fmodf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -103,7 +103,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1
-; RV32IZHINXMIN-NEXT: call fmodf@plt
+; RV32IZHINXMIN-NEXT: call fmodf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -115,7 +115,7 @@ define half @frem_f16(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1
-; RV64IZHINXMIN-NEXT: call fmodf@plt
+; RV64IZHINXMIN-NEXT: call fmodf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 2d4b7538c34d..c493a9b2cb1d 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -75,9 +75,9 @@ define half @sqrt_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call sqrtf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call sqrtf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -88,9 +88,9 @@ define half @sqrt_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call sqrtf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call sqrtf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -120,7 +120,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call __powisf2@plt
+; RV32IZFH-NEXT: call __powisf2
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -132,7 +132,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
; RV64IZFH-NEXT: sext.w a0, a0
-; RV64IZFH-NEXT: call __powisf2@plt
+; RV64IZFH-NEXT: call __powisf2
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -143,7 +143,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call __powisf2@plt
+; RV32IZHINX-NEXT: call __powisf2
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -155,7 +155,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sext.w a1, a1
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call __powisf2@plt
+; RV64IZHINX-NEXT: call __powisf2
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -169,10 +169,10 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __powisf2@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __powisf2
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -186,10 +186,10 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: sext.w a1, s0
-; RV64I-NEXT: call __powisf2@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __powisf2
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -200,7 +200,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call __powisf2@plt
+; RV32IZFHMIN-NEXT: call __powisf2
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -212,7 +212,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
; RV64IZFHMIN-NEXT: sext.w a0, a0
-; RV64IZFHMIN-NEXT: call __powisf2@plt
+; RV64IZFHMIN-NEXT: call __powisf2
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -223,7 +223,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call __powisf2@plt
+; RV32IZHINXMIN-NEXT: call __powisf2
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -235,7 +235,7 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: sext.w a1, a1
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call __powisf2@plt
+; RV64IZHINXMIN-NEXT: call __powisf2
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -252,7 +252,7 @@ define half @sin_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call sinf@plt
+; RV32IZFH-NEXT: call sinf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -263,7 +263,7 @@ define half @sin_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call sinf@plt
+; RV64IZFH-NEXT: call sinf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -274,7 +274,7 @@ define half @sin_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call sinf@plt
+; RV32IZHINX-NEXT: call sinf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -285,7 +285,7 @@ define half @sin_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call sinf@plt
+; RV64IZHINX-NEXT: call sinf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -297,9 +297,9 @@ define half @sin_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call sinf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call sinf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -310,9 +310,9 @@ define half @sin_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call sinf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call sinf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -322,7 +322,7 @@ define half @sin_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call sinf@plt
+; RV32IZFHMIN-NEXT: call sinf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -333,7 +333,7 @@ define half @sin_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call sinf@plt
+; RV64IZFHMIN-NEXT: call sinf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -344,7 +344,7 @@ define half @sin_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call sinf@plt
+; RV32IZHINXMIN-NEXT: call sinf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -355,7 +355,7 @@ define half @sin_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call sinf@plt
+; RV64IZHINXMIN-NEXT: call sinf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -372,7 +372,7 @@ define half @cos_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call cosf@plt
+; RV32IZFH-NEXT: call cosf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -383,7 +383,7 @@ define half @cos_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call cosf@plt
+; RV64IZFH-NEXT: call cosf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -394,7 +394,7 @@ define half @cos_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call cosf@plt
+; RV32IZHINX-NEXT: call cosf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -405,7 +405,7 @@ define half @cos_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call cosf@plt
+; RV64IZHINX-NEXT: call cosf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -417,9 +417,9 @@ define half @cos_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call cosf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call cosf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -430,9 +430,9 @@ define half @cos_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call cosf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call cosf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -442,7 +442,7 @@ define half @cos_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call cosf@plt
+; RV32IZFHMIN-NEXT: call cosf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -453,7 +453,7 @@ define half @cos_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call cosf@plt
+; RV64IZFHMIN-NEXT: call cosf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -464,7 +464,7 @@ define half @cos_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call cosf@plt
+; RV32IZHINXMIN-NEXT: call cosf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -475,7 +475,7 @@ define half @cos_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call cosf@plt
+; RV64IZHINXMIN-NEXT: call cosf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -494,10 +494,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IFZFH-NEXT: fsw fs1, 4(sp) # 4-byte Folded Spill
; RV32IFZFH-NEXT: fcvt.s.h fs0, fa0
; RV32IFZFH-NEXT: fmv.s fa0, fs0
-; RV32IFZFH-NEXT: call sinf@plt
+; RV32IFZFH-NEXT: call sinf
; RV32IFZFH-NEXT: fcvt.h.s fs1, fa0
; RV32IFZFH-NEXT: fmv.s fa0, fs0
-; RV32IFZFH-NEXT: call cosf@plt
+; RV32IFZFH-NEXT: call cosf
; RV32IFZFH-NEXT: fcvt.h.s fa5, fa0
; RV32IFZFH-NEXT: fadd.h fa0, fs1, fa5
; RV32IFZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -514,10 +514,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IFZFH-NEXT: fsw fs1, 0(sp) # 4-byte Folded Spill
; RV64IFZFH-NEXT: fcvt.s.h fs0, fa0
; RV64IFZFH-NEXT: fmv.s fa0, fs0
-; RV64IFZFH-NEXT: call sinf@plt
+; RV64IFZFH-NEXT: call sinf
; RV64IFZFH-NEXT: fcvt.h.s fs1, fa0
; RV64IFZFH-NEXT: fmv.s fa0, fs0
-; RV64IFZFH-NEXT: call cosf@plt
+; RV64IFZFH-NEXT: call cosf
; RV64IFZFH-NEXT: fcvt.h.s fa5, fa0
; RV64IFZFH-NEXT: fadd.h fa0, fs1, fa5
; RV64IFZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -534,10 +534,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call sinf@plt
+; RV32IZHINX-NEXT: call sinf
; RV32IZHINX-NEXT: fcvt.h.s s1, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call cosf@plt
+; RV32IZHINX-NEXT: call cosf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: fadd.h a0, s1, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -554,10 +554,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h s0, a0
; RV64IZHINX-NEXT: mv a0, s0
-; RV64IZHINX-NEXT: call sinf@plt
+; RV64IZHINX-NEXT: call sinf
; RV64IZHINX-NEXT: fcvt.h.s s1, a0
; RV64IZHINX-NEXT: mv a0, s0
-; RV64IZHINX-NEXT: call cosf@plt
+; RV64IZHINX-NEXT: call cosf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: fadd.h a0, s1, a0
; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -574,10 +574,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IDZFH-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV32IDZFH-NEXT: fcvt.s.h fs0, fa0
; RV32IDZFH-NEXT: fmv.s fa0, fs0
-; RV32IDZFH-NEXT: call sinf@plt
+; RV32IDZFH-NEXT: call sinf
; RV32IDZFH-NEXT: fcvt.h.s fs1, fa0
; RV32IDZFH-NEXT: fmv.s fa0, fs0
-; RV32IDZFH-NEXT: call cosf@plt
+; RV32IDZFH-NEXT: call cosf
; RV32IDZFH-NEXT: fcvt.h.s fa5, fa0
; RV32IDZFH-NEXT: fadd.h fa0, fs1, fa5
; RV32IDZFH-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -594,10 +594,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IDZFH-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV64IDZFH-NEXT: fcvt.s.h fs0, fa0
; RV64IDZFH-NEXT: fmv.s fa0, fs0
-; RV64IDZFH-NEXT: call sinf@plt
+; RV64IDZFH-NEXT: call sinf
; RV64IDZFH-NEXT: fcvt.h.s fs1, fa0
; RV64IDZFH-NEXT: fmv.s fa0, fs0
-; RV64IDZFH-NEXT: call cosf@plt
+; RV64IDZFH-NEXT: call cosf
; RV64IDZFH-NEXT: fcvt.h.s fa5, fa0
; RV64IDZFH-NEXT: fadd.h fa0, fs1, fa5
; RV64IDZFH-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -616,24 +616,24 @@ define half @sincos_f16(half %a) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: call sinf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call sinf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call cosf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call cosf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s1, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -651,24 +651,24 @@ define half @sincos_f16(half %a) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: call sinf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call sinf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call cosf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call cosf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s1, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -684,10 +684,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IFZFHMIN-NEXT: fsw fs1, 4(sp) # 4-byte Folded Spill
; RV32IFZFHMIN-NEXT: fcvt.s.h fs0, fa0
; RV32IFZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IFZFHMIN-NEXT: call sinf@plt
+; RV32IFZFHMIN-NEXT: call sinf
; RV32IFZFHMIN-NEXT: fcvt.h.s fs1, fa0
; RV32IFZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IFZFHMIN-NEXT: call cosf@plt
+; RV32IFZFHMIN-NEXT: call cosf
; RV32IFZFHMIN-NEXT: fcvt.h.s fa5, fa0
; RV32IFZFHMIN-NEXT: fcvt.s.h fa5, fa5
; RV32IFZFHMIN-NEXT: fcvt.s.h fa4, fs1
@@ -707,10 +707,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IFZFHMIN-NEXT: fsw fs1, 0(sp) # 4-byte Folded Spill
; RV64IFZFHMIN-NEXT: fcvt.s.h fs0, fa0
; RV64IFZFHMIN-NEXT: fmv.s fa0, fs0
-; RV64IFZFHMIN-NEXT: call sinf@plt
+; RV64IFZFHMIN-NEXT: call sinf
; RV64IFZFHMIN-NEXT: fcvt.h.s fs1, fa0
; RV64IFZFHMIN-NEXT: fmv.s fa0, fs0
-; RV64IFZFHMIN-NEXT: call cosf@plt
+; RV64IFZFHMIN-NEXT: call cosf
; RV64IFZFHMIN-NEXT: fcvt.h.s fa5, fa0
; RV64IFZFHMIN-NEXT: fcvt.s.h fa5, fa5
; RV64IFZFHMIN-NEXT: fcvt.s.h fa4, fs1
@@ -730,10 +730,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IDZFHMIN-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV32IDZFHMIN-NEXT: fcvt.s.h fs0, fa0
; RV32IDZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IDZFHMIN-NEXT: call sinf@plt
+; RV32IDZFHMIN-NEXT: call sinf
; RV32IDZFHMIN-NEXT: fcvt.h.s fs1, fa0
; RV32IDZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IDZFHMIN-NEXT: call cosf@plt
+; RV32IDZFHMIN-NEXT: call cosf
; RV32IDZFHMIN-NEXT: fcvt.h.s fa5, fa0
; RV32IDZFHMIN-NEXT: fcvt.s.h fa5, fa5
; RV32IDZFHMIN-NEXT: fcvt.s.h fa4, fs1
@@ -753,10 +753,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IDZFHMIN-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill
; RV64IDZFHMIN-NEXT: fcvt.s.h fs0, fa0
; RV64IDZFHMIN-NEXT: fmv.s fa0, fs0
-; RV64IDZFHMIN-NEXT: call sinf@plt
+; RV64IDZFHMIN-NEXT: call sinf
; RV64IDZFHMIN-NEXT: fcvt.h.s fs1, fa0
; RV64IDZFHMIN-NEXT: fmv.s fa0, fs0
-; RV64IDZFHMIN-NEXT: call cosf@plt
+; RV64IDZFHMIN-NEXT: call cosf
; RV64IDZFHMIN-NEXT: fcvt.h.s fa5, fa0
; RV64IDZFHMIN-NEXT: fcvt.s.h fa5, fa5
; RV64IDZFHMIN-NEXT: fcvt.s.h fa4, fs1
@@ -776,10 +776,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call sinf@plt
+; RV32IZHINXMIN-NEXT: call sinf
; RV32IZHINXMIN-NEXT: fcvt.h.s s1, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call cosf@plt
+; RV32IZHINXMIN-NEXT: call cosf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
; RV32IZHINXMIN-NEXT: fcvt.s.h a1, s1
@@ -799,10 +799,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV64IZHINXMIN-NEXT: mv a0, s0
-; RV64IZHINXMIN-NEXT: call sinf@plt
+; RV64IZHINXMIN-NEXT: call sinf
; RV64IZHINXMIN-NEXT: fcvt.h.s s1, a0
; RV64IZHINXMIN-NEXT: mv a0, s0
-; RV64IZHINXMIN-NEXT: call cosf@plt
+; RV64IZHINXMIN-NEXT: call cosf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
; RV64IZHINXMIN-NEXT: fcvt.s.h a1, s1
@@ -828,7 +828,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
; RV32IZFH-NEXT: fcvt.s.h fa1, fa1
-; RV32IZFH-NEXT: call powf@plt
+; RV32IZFH-NEXT: call powf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -840,7 +840,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
; RV64IZFH-NEXT: fcvt.s.h fa1, fa1
-; RV64IZFH-NEXT: call powf@plt
+; RV64IZFH-NEXT: call powf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -852,7 +852,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
; RV32IZHINX-NEXT: fcvt.s.h a1, a1
-; RV32IZHINX-NEXT: call powf@plt
+; RV32IZHINX-NEXT: call powf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -864,7 +864,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
; RV64IZHINX-NEXT: fcvt.s.h a1, a1
-; RV64IZHINX-NEXT: call powf@plt
+; RV64IZHINX-NEXT: call powf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -881,14 +881,14 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call powf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call powf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -907,14 +907,14 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call powf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call powf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -928,7 +928,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
; RV32IZFHMIN-NEXT: fcvt.s.h fa1, fa1
-; RV32IZFHMIN-NEXT: call powf@plt
+; RV32IZFHMIN-NEXT: call powf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -940,7 +940,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
; RV64IZFHMIN-NEXT: fcvt.s.h fa1, fa1
-; RV64IZFHMIN-NEXT: call powf@plt
+; RV64IZFHMIN-NEXT: call powf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -952,7 +952,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1
-; RV32IZHINXMIN-NEXT: call powf@plt
+; RV32IZHINXMIN-NEXT: call powf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -964,7 +964,7 @@ define half @pow_f16(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1
-; RV64IZHINXMIN-NEXT: call powf@plt
+; RV64IZHINXMIN-NEXT: call powf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -981,7 +981,7 @@ define half @exp_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call expf@plt
+; RV32IZFH-NEXT: call expf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -992,7 +992,7 @@ define half @exp_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call expf@plt
+; RV64IZFH-NEXT: call expf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -1003,7 +1003,7 @@ define half @exp_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call expf@plt
+; RV32IZHINX-NEXT: call expf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -1014,7 +1014,7 @@ define half @exp_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call expf@plt
+; RV64IZHINX-NEXT: call expf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -1026,9 +1026,9 @@ define half @exp_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call expf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call expf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1039,9 +1039,9 @@ define half @exp_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call expf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call expf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1051,7 +1051,7 @@ define half @exp_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call expf@plt
+; RV32IZFHMIN-NEXT: call expf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -1062,7 +1062,7 @@ define half @exp_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call expf@plt
+; RV64IZFHMIN-NEXT: call expf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -1073,7 +1073,7 @@ define half @exp_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call expf@plt
+; RV32IZHINXMIN-NEXT: call expf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1084,7 +1084,7 @@ define half @exp_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call expf@plt
+; RV64IZHINXMIN-NEXT: call expf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1101,7 +1101,7 @@ define half @exp2_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call exp2f@plt
+; RV32IZFH-NEXT: call exp2f
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -1112,7 +1112,7 @@ define half @exp2_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call exp2f@plt
+; RV64IZFH-NEXT: call exp2f
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -1123,7 +1123,7 @@ define half @exp2_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call exp2f@plt
+; RV32IZHINX-NEXT: call exp2f
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -1134,7 +1134,7 @@ define half @exp2_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call exp2f@plt
+; RV64IZHINX-NEXT: call exp2f
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -1146,9 +1146,9 @@ define half @exp2_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call exp2f@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call exp2f
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1159,9 +1159,9 @@ define half @exp2_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call exp2f@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call exp2f
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1171,7 +1171,7 @@ define half @exp2_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call exp2f@plt
+; RV32IZFHMIN-NEXT: call exp2f
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -1182,7 +1182,7 @@ define half @exp2_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call exp2f@plt
+; RV64IZFHMIN-NEXT: call exp2f
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -1193,7 +1193,7 @@ define half @exp2_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call exp2f@plt
+; RV32IZHINXMIN-NEXT: call exp2f
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1204,7 +1204,7 @@ define half @exp2_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call exp2f@plt
+; RV64IZHINXMIN-NEXT: call exp2f
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1221,7 +1221,7 @@ define half @log_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call logf@plt
+; RV32IZFH-NEXT: call logf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -1232,7 +1232,7 @@ define half @log_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call logf@plt
+; RV64IZFH-NEXT: call logf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -1243,7 +1243,7 @@ define half @log_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call logf@plt
+; RV32IZHINX-NEXT: call logf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -1254,7 +1254,7 @@ define half @log_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call logf@plt
+; RV64IZHINX-NEXT: call logf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -1266,9 +1266,9 @@ define half @log_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call logf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call logf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1279,9 +1279,9 @@ define half @log_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call logf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call logf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1291,7 +1291,7 @@ define half @log_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call logf@plt
+; RV32IZFHMIN-NEXT: call logf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -1302,7 +1302,7 @@ define half @log_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call logf@plt
+; RV64IZFHMIN-NEXT: call logf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -1313,7 +1313,7 @@ define half @log_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call logf@plt
+; RV32IZHINXMIN-NEXT: call logf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1324,7 +1324,7 @@ define half @log_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call logf@plt
+; RV64IZHINXMIN-NEXT: call logf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1341,7 +1341,7 @@ define half @log10_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call log10f@plt
+; RV32IZFH-NEXT: call log10f
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -1352,7 +1352,7 @@ define half @log10_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call log10f@plt
+; RV64IZFH-NEXT: call log10f
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -1363,7 +1363,7 @@ define half @log10_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call log10f@plt
+; RV32IZHINX-NEXT: call log10f
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -1374,7 +1374,7 @@ define half @log10_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call log10f@plt
+; RV64IZHINX-NEXT: call log10f
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -1386,9 +1386,9 @@ define half @log10_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call log10f@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call log10f
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1399,9 +1399,9 @@ define half @log10_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call log10f@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call log10f
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1411,7 +1411,7 @@ define half @log10_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call log10f@plt
+; RV32IZFHMIN-NEXT: call log10f
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -1422,7 +1422,7 @@ define half @log10_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call log10f@plt
+; RV64IZFHMIN-NEXT: call log10f
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -1433,7 +1433,7 @@ define half @log10_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call log10f@plt
+; RV32IZHINXMIN-NEXT: call log10f
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1444,7 +1444,7 @@ define half @log10_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call log10f@plt
+; RV64IZHINXMIN-NEXT: call log10f
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1461,7 +1461,7 @@ define half @log2_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call log2f@plt
+; RV32IZFH-NEXT: call log2f
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -1472,7 +1472,7 @@ define half @log2_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call log2f@plt
+; RV64IZFH-NEXT: call log2f
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -1483,7 +1483,7 @@ define half @log2_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call log2f@plt
+; RV32IZHINX-NEXT: call log2f
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -1494,7 +1494,7 @@ define half @log2_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call log2f@plt
+; RV64IZHINX-NEXT: call log2f
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -1506,9 +1506,9 @@ define half @log2_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call log2f@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call log2f
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1519,9 +1519,9 @@ define half @log2_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call log2f@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call log2f
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1531,7 +1531,7 @@ define half @log2_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call log2f@plt
+; RV32IZFHMIN-NEXT: call log2f
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -1542,7 +1542,7 @@ define half @log2_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call log2f@plt
+; RV64IZFHMIN-NEXT: call log2f
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -1553,7 +1553,7 @@ define half @log2_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call log2f@plt
+; RV32IZHINXMIN-NEXT: call log2f
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1564,7 +1564,7 @@ define half @log2_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call log2f@plt
+; RV64IZHINXMIN-NEXT: call log2f
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -1599,18 +1599,18 @@ define half @fma_f16(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call fmaf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1632,18 +1632,18 @@ define half @fma_f16(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call fmaf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -1699,23 +1699,23 @@ define half @fmuladd_f16(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __mulsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __mulsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s1, s3
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
@@ -1737,23 +1737,23 @@ define half @fmuladd_f16(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __mulsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __mulsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s1, s3
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
@@ -1867,14 +1867,14 @@ define half @minnum_f16(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call fminf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fminf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1893,14 +1893,14 @@ define half @minnum_f16(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fminf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fminf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -1951,14 +1951,14 @@ define half @maxnum_f16(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call fmaxf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call fmaxf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -1977,14 +1977,14 @@ define half @maxnum_f16(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call fmaxf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call fmaxf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
@@ -2159,9 +2159,9 @@ define half @floor_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call floorf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call floorf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2172,9 +2172,9 @@ define half @floor_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call floorf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call floorf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2250,9 +2250,9 @@ define half @ceil_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call ceilf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call ceilf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2263,9 +2263,9 @@ define half @ceil_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call ceilf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call ceilf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2341,9 +2341,9 @@ define half @trunc_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call truncf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call truncf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2354,9 +2354,9 @@ define half @trunc_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call truncf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call truncf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2432,9 +2432,9 @@ define half @rint_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call rintf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call rintf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2445,9 +2445,9 @@ define half @rint_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call rintf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call rintf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2494,7 +2494,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call nearbyintf@plt
+; RV32IZFH-NEXT: call nearbyintf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -2505,7 +2505,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call nearbyintf@plt
+; RV64IZFH-NEXT: call nearbyintf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -2516,7 +2516,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call nearbyintf@plt
+; RV32IZHINX-NEXT: call nearbyintf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -2527,7 +2527,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call nearbyintf@plt
+; RV64IZHINX-NEXT: call nearbyintf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -2539,9 +2539,9 @@ define half @nearbyint_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call nearbyintf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call nearbyintf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2552,9 +2552,9 @@ define half @nearbyint_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call nearbyintf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call nearbyintf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2564,7 +2564,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call nearbyintf@plt
+; RV32IZFHMIN-NEXT: call nearbyintf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -2575,7 +2575,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call nearbyintf@plt
+; RV64IZFHMIN-NEXT: call nearbyintf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -2586,7 +2586,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-NEXT: call nearbyintf@plt
+; RV32IZHINXMIN-NEXT: call nearbyintf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
@@ -2597,7 +2597,7 @@ define half @nearbyint_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-NEXT: call nearbyintf@plt
+; RV64IZHINXMIN-NEXT: call nearbyintf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
@@ -2643,9 +2643,9 @@ define half @round_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call roundf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call roundf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2656,9 +2656,9 @@ define half @round_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call roundf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call roundf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -2734,9 +2734,9 @@ define half @roundeven_f16(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
-; RV32I-NEXT: call roundevenf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __extendhfsf2
+; RV32I-NEXT: call roundevenf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -2747,9 +2747,9 @@ define half @roundeven_f16(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
-; RV64I-NEXT: call roundevenf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __extendhfsf2
+; RV64I-NEXT: call roundevenf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index bb25d2a7443d..5b6a94a83f94 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -264,7 +264,7 @@ define half @flh_stack(half %a) nounwind {
; RV32IZFH-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fmv.h fs0, fa0
; RV32IZFH-NEXT: addi a0, sp, 4
-; RV32IZFH-NEXT: call notdead@plt
+; RV32IZFH-NEXT: call notdead
; RV32IZFH-NEXT: flh fa5, 4(sp)
; RV32IZFH-NEXT: fadd.h fa0, fa5, fs0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -279,7 +279,7 @@ define half @flh_stack(half %a) nounwind {
; RV64IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV64IZFH-NEXT: fmv.h fs0, fa0
; RV64IZFH-NEXT: mv a0, sp
-; RV64IZFH-NEXT: call notdead@plt
+; RV64IZFH-NEXT: call notdead
; RV64IZFH-NEXT: flh fa5, 0(sp)
; RV64IZFH-NEXT: fadd.h fa0, fa5, fs0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -294,7 +294,7 @@ define half @flh_stack(half %a) nounwind {
; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: mv s0, a0
; RV32IZHINX-NEXT: addi a0, sp, 4
-; RV32IZHINX-NEXT: call notdead@plt
+; RV32IZHINX-NEXT: call notdead
; RV32IZHINX-NEXT: lh a0, 4(sp)
; RV32IZHINX-NEXT: fadd.h a0, a0, s0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -309,7 +309,7 @@ define half @flh_stack(half %a) nounwind {
; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: mv s0, a0
; RV64IZHINX-NEXT: addi a0, sp, 12
-; RV64IZHINX-NEXT: call notdead@plt
+; RV64IZHINX-NEXT: call notdead
; RV64IZHINX-NEXT: lh a0, 12(sp)
; RV64IZHINX-NEXT: fadd.h a0, a0, s0
; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -324,7 +324,7 @@ define half @flh_stack(half %a) nounwind {
; RV32IZFHMIN-NEXT: fsw fs0, 8(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fmv.s fs0, fa0
; RV32IZFHMIN-NEXT: addi a0, sp, 4
-; RV32IZFHMIN-NEXT: call notdead@plt
+; RV32IZFHMIN-NEXT: call notdead
; RV32IZFHMIN-NEXT: flh fa5, 4(sp)
; RV32IZFHMIN-NEXT: fcvt.s.h fa4, fs0
; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa5
@@ -342,7 +342,7 @@ define half @flh_stack(half %a) nounwind {
; RV64IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
; RV64IZFHMIN-NEXT: fmv.s fs0, fa0
; RV64IZFHMIN-NEXT: mv a0, sp
-; RV64IZFHMIN-NEXT: call notdead@plt
+; RV64IZFHMIN-NEXT: call notdead
; RV64IZFHMIN-NEXT: flh fa5, 0(sp)
; RV64IZFHMIN-NEXT: fcvt.s.h fa4, fs0
; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5
@@ -360,7 +360,7 @@ define half @flh_stack(half %a) nounwind {
; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: mv s0, a0
; RV32IZHINXMIN-NEXT: addi a0, sp, 4
-; RV32IZHINXMIN-NEXT: call notdead@plt
+; RV32IZHINXMIN-NEXT: call notdead
; RV32IZHINXMIN-NEXT: lh a0, 4(sp)
; RV32IZHINXMIN-NEXT: fcvt.s.h a1, s0
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
@@ -378,7 +378,7 @@ define half @flh_stack(half %a) nounwind {
; RV64IZHINXMIN-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: mv s0, a0
; RV64IZHINXMIN-NEXT: addi a0, sp, 12
-; RV64IZHINXMIN-NEXT: call notdead@plt
+; RV64IZHINXMIN-NEXT: call notdead
; RV64IZHINXMIN-NEXT: lh a0, 12(sp)
; RV64IZHINXMIN-NEXT: fcvt.s.h a1, s0
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
@@ -403,7 +403,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV32IZFH-NEXT: fadd.h fa5, fa0, fa1
; RV32IZFH-NEXT: fsh fa5, 8(sp)
; RV32IZFH-NEXT: addi a0, sp, 8
-; RV32IZFH-NEXT: call notdead@plt
+; RV32IZFH-NEXT: call notdead
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -415,7 +415,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV64IZFH-NEXT: fadd.h fa5, fa0, fa1
; RV64IZFH-NEXT: fsh fa5, 4(sp)
; RV64IZFH-NEXT: addi a0, sp, 4
-; RV64IZFH-NEXT: call notdead@plt
+; RV64IZFH-NEXT: call notdead
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
@@ -427,7 +427,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV32IZHINX-NEXT: fadd.h a0, a0, a1
; RV32IZHINX-NEXT: sh a0, 8(sp)
; RV32IZHINX-NEXT: addi a0, sp, 8
-; RV32IZHINX-NEXT: call notdead@plt
+; RV32IZHINX-NEXT: call notdead
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -439,7 +439,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV64IZHINX-NEXT: fadd.h a0, a0, a1
; RV64IZHINX-NEXT: sh a0, 4(sp)
; RV64IZHINX-NEXT: addi a0, sp, 4
-; RV64IZHINX-NEXT: call notdead@plt
+; RV64IZHINX-NEXT: call notdead
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
; RV64IZHINX-NEXT: ret
@@ -454,7 +454,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV32IZFHMIN-NEXT: fcvt.h.s fa5, fa5
; RV32IZFHMIN-NEXT: fsh fa5, 8(sp)
; RV32IZFHMIN-NEXT: addi a0, sp, 8
-; RV32IZFHMIN-NEXT: call notdead@plt
+; RV32IZFHMIN-NEXT: call notdead
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -469,7 +469,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV64IZFHMIN-NEXT: fcvt.h.s fa5, fa5
; RV64IZFHMIN-NEXT: fsh fa5, 4(sp)
; RV64IZFHMIN-NEXT: addi a0, sp, 4
-; RV64IZFHMIN-NEXT: call notdead@plt
+; RV64IZFHMIN-NEXT: call notdead
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
; RV64IZFHMIN-NEXT: ret
@@ -484,7 +484,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: sh a0, 8(sp)
; RV32IZHINXMIN-NEXT: addi a0, sp, 8
-; RV32IZHINXMIN-NEXT: call notdead@plt
+; RV32IZHINXMIN-NEXT: call notdead
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -499,7 +499,7 @@ define dso_local void @fsh_stack(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: sh a0, 4(sp)
; RV64IZHINXMIN-NEXT: addi a0, sp, 4
-; RV64IZHINXMIN-NEXT: call notdead@plt
+; RV64IZHINXMIN-NEXT: call notdead
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-NEXT: addi sp, sp, 16
; RV64IZHINXMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index 962ed8393b72..3f385909b0b5 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -114,7 +114,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, a0
; RV32IZFH-NEXT: fle.s s0, fa5, fs0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixsfdi@plt
+; RV32IZFH-NEXT: call __fixsfdi
; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: lui a2, 524288
; RV32IZFH-NEXT: beqz s0, .LBB1_4
@@ -173,7 +173,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s s1, a0, s0
; RV32IZHINX-NEXT: neg s2, s1
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixsfdi@plt
+; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI1_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI1_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
@@ -243,7 +243,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IZFHMIN-NEXT: call __fixsfdi
; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: lui a2, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB1_4
@@ -316,7 +316,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
; RV32IZHINXMIN-NEXT: neg s2, s1
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixsfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
@@ -529,7 +529,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZFH-NEXT: fle.s a0, fa5, fs0
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixunssfdi@plt
+; RV32IZFH-NEXT: call __fixunssfdi
; RV32IZFH-NEXT: lui a2, %hi(.LCPI3_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI3_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
@@ -573,7 +573,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI3_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI3_1)(a2)
; RV32IZHINX-NEXT: and a0, s1, a0
@@ -630,7 +630,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixunssfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunssfdi
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI3_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
@@ -688,7 +688,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI3_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI3_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s1, a0
@@ -826,7 +826,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, a0
; RV32IZFH-NEXT: fle.s s0, fa5, fs0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixsfdi@plt
+; RV32IZFH-NEXT: call __fixsfdi
; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: lui a2, 524288
; RV32IZFH-NEXT: beqz s0, .LBB5_4
@@ -885,7 +885,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s s1, a0, s0
; RV32IZHINX-NEXT: neg s2, s1
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixsfdi@plt
+; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI5_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI5_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
@@ -955,7 +955,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IZFHMIN-NEXT: call __fixsfdi
; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: lui a2, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB5_4
@@ -1028,7 +1028,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
; RV32IZHINXMIN-NEXT: neg s2, s1
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixsfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI5_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI5_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
@@ -1241,7 +1241,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZFH-NEXT: fle.s a0, fa5, fs0
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixunssfdi@plt
+; RV32IZFH-NEXT: call __fixunssfdi
; RV32IZFH-NEXT: lui a2, %hi(.LCPI7_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI7_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
@@ -1285,7 +1285,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI7_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI7_1)(a2)
; RV32IZHINX-NEXT: and a0, s1, a0
@@ -1342,7 +1342,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixunssfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunssfdi
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI7_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI7_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
@@ -1400,7 +1400,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI7_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI7_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s1, a0
@@ -1538,7 +1538,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, a0
; RV32IZFH-NEXT: fle.s s0, fa5, fs0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixsfdi@plt
+; RV32IZFH-NEXT: call __fixsfdi
; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: lui a2, 524288
; RV32IZFH-NEXT: beqz s0, .LBB9_4
@@ -1597,7 +1597,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s s1, a0, s0
; RV32IZHINX-NEXT: neg s2, s1
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixsfdi@plt
+; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI9_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI9_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
@@ -1667,7 +1667,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IZFHMIN-NEXT: call __fixsfdi
; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: lui a2, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB9_4
@@ -1740,7 +1740,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
; RV32IZHINXMIN-NEXT: neg s2, s1
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixsfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI9_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI9_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
@@ -1953,7 +1953,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZFH-NEXT: fle.s a0, fa5, fs0
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixunssfdi@plt
+; RV32IZFH-NEXT: call __fixunssfdi
; RV32IZFH-NEXT: lui a2, %hi(.LCPI11_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI11_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
@@ -1997,7 +1997,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI11_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI11_1)(a2)
; RV32IZHINX-NEXT: and a0, s1, a0
@@ -2054,7 +2054,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixunssfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunssfdi
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI11_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI11_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
@@ -2112,7 +2112,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI11_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI11_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s1, a0
@@ -2250,7 +2250,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, a0
; RV32IZFH-NEXT: fle.s s0, fa5, fs0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixsfdi@plt
+; RV32IZFH-NEXT: call __fixsfdi
; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: lui a2, 524288
; RV32IZFH-NEXT: beqz s0, .LBB13_4
@@ -2309,7 +2309,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s s1, a0, s0
; RV32IZHINX-NEXT: neg s2, s1
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixsfdi@plt
+; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI13_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI13_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
@@ -2379,7 +2379,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IZFHMIN-NEXT: call __fixsfdi
; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: lui a2, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB13_4
@@ -2452,7 +2452,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
; RV32IZHINXMIN-NEXT: neg s2, s1
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixsfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI13_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI13_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
@@ -2665,7 +2665,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZFH-NEXT: fle.s a0, fa5, fs0
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixunssfdi@plt
+; RV32IZFH-NEXT: call __fixunssfdi
; RV32IZFH-NEXT: lui a2, %hi(.LCPI15_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI15_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
@@ -2709,7 +2709,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI15_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI15_1)(a2)
; RV32IZHINX-NEXT: and a0, s1, a0
@@ -2766,7 +2766,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixunssfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunssfdi
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI15_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI15_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
@@ -2824,7 +2824,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI15_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI15_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s1, a0
@@ -2962,7 +2962,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, a0
; RV32IZFH-NEXT: fle.s s0, fa5, fs0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixsfdi@plt
+; RV32IZFH-NEXT: call __fixsfdi
; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: lui a2, 524288
; RV32IZFH-NEXT: beqz s0, .LBB17_4
@@ -3021,7 +3021,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s s1, a0, s0
; RV32IZHINX-NEXT: neg s2, s1
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixsfdi@plt
+; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI17_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI17_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
@@ -3091,7 +3091,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IZFHMIN-NEXT: call __fixsfdi
; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: lui a2, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB17_4
@@ -3164,7 +3164,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
; RV32IZHINXMIN-NEXT: neg s2, s1
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixsfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI17_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI17_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
@@ -3377,7 +3377,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZFH-NEXT: fle.s a0, fa5, fs0
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixunssfdi@plt
+; RV32IZFH-NEXT: call __fixunssfdi
; RV32IZFH-NEXT: lui a2, %hi(.LCPI19_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI19_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
@@ -3421,7 +3421,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI19_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI19_1)(a2)
; RV32IZHINX-NEXT: and a0, s1, a0
@@ -3478,7 +3478,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixunssfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunssfdi
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI19_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI19_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
@@ -3536,7 +3536,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI19_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI19_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s1, a0
@@ -3674,7 +3674,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: fmv.w.x fa5, a0
; RV32IZFH-NEXT: fle.s s0, fa5, fs0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixsfdi@plt
+; RV32IZFH-NEXT: call __fixsfdi
; RV32IZFH-NEXT: lui a4, 524288
; RV32IZFH-NEXT: lui a2, 524288
; RV32IZFH-NEXT: beqz s0, .LBB21_4
@@ -3733,7 +3733,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s s1, a0, s0
; RV32IZHINX-NEXT: neg s2, s1
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixsfdi@plt
+; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI21_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI21_1)(a2)
; RV32IZHINX-NEXT: and a0, s2, a0
@@ -3803,7 +3803,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fmv.w.x fa5, a0
; RV32IZFHMIN-NEXT: fle.s s0, fa5, fs0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixsfdi@plt
+; RV32IZFHMIN-NEXT: call __fixsfdi
; RV32IZFHMIN-NEXT: lui a4, 524288
; RV32IZFHMIN-NEXT: lui a2, 524288
; RV32IZFHMIN-NEXT: beqz s0, .LBB21_4
@@ -3876,7 +3876,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
; RV32IZHINXMIN-NEXT: neg s2, s1
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixsfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI21_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI21_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s2, a0
@@ -4089,7 +4089,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZFH-NEXT: fle.s a0, fa5, fs0
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
-; RV32IZFH-NEXT: call __fixunssfdi@plt
+; RV32IZFH-NEXT: call __fixunssfdi
; RV32IZFH-NEXT: lui a2, %hi(.LCPI23_1)
; RV32IZFH-NEXT: flw fa5, %lo(.LCPI23_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
@@ -4133,7 +4133,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
; RV32IZHINX-NEXT: mv a0, s0
-; RV32IZHINX-NEXT: call __fixunssfdi@plt
+; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: lui a2, %hi(.LCPI23_1)
; RV32IZHINX-NEXT: lw a2, %lo(.LCPI23_1)(a2)
; RV32IZHINX-NEXT: and a0, s1, a0
@@ -4190,7 +4190,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: fle.s a0, fa5, fs0
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
-; RV32IZFHMIN-NEXT: call __fixunssfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunssfdi
; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI23_0)
; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI23_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
@@ -4248,7 +4248,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
; RV32IZHINXMIN-NEXT: mv a0, s0
-; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI23_0)
; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI23_0)(a2)
; RV32IZHINXMIN-NEXT: and a0, s1, a0
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll
index 84ba49684fc6..173164db5b78 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll
@@ -323,7 +323,7 @@ define i64 @test_floor_si64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixhfdi@plt
+; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -349,7 +349,7 @@ define i64 @test_floor_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixhfdi@plt
+; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -387,7 +387,7 @@ define i64 @test_floor_si64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixhfdi@plt
+; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -427,7 +427,7 @@ define i64 @test_floor_si64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixhfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -760,7 +760,7 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixunshfdi@plt
+; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -786,7 +786,7 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -824,7 +824,7 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixunshfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -864,7 +864,7 @@ define i64 @test_floor_ui64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -1197,7 +1197,7 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixhfdi@plt
+; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -1223,7 +1223,7 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixhfdi@plt
+; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -1261,7 +1261,7 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixhfdi@plt
+; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -1301,7 +1301,7 @@ define i64 @test_ceil_si64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixhfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -1634,7 +1634,7 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixunshfdi@plt
+; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -1660,7 +1660,7 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -1698,7 +1698,7 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixunshfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -1738,7 +1738,7 @@ define i64 @test_ceil_ui64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -2071,7 +2071,7 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixhfdi@plt
+; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -2097,7 +2097,7 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixhfdi@plt
+; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -2135,7 +2135,7 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixhfdi@plt
+; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -2175,7 +2175,7 @@ define i64 @test_trunc_si64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixhfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -2508,7 +2508,7 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixunshfdi@plt
+; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -2534,7 +2534,7 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -2572,7 +2572,7 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixunshfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -2612,7 +2612,7 @@ define i64 @test_trunc_ui64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -2945,7 +2945,7 @@ define i64 @test_round_si64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixhfdi@plt
+; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -2971,7 +2971,7 @@ define i64 @test_round_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixhfdi@plt
+; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -3009,7 +3009,7 @@ define i64 @test_round_si64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixhfdi@plt
+; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -3049,7 +3049,7 @@ define i64 @test_round_si64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixhfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -3382,7 +3382,7 @@ define i64 @test_round_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixunshfdi@plt
+; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -3408,7 +3408,7 @@ define i64 @test_round_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -3446,7 +3446,7 @@ define i64 @test_round_ui64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixunshfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -3486,7 +3486,7 @@ define i64 @test_round_ui64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -3819,7 +3819,7 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixhfdi@plt
+; RV32IZFH-NEXT: call __fixhfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -3845,7 +3845,7 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixhfdi@plt
+; RV32IZHINX-NEXT: call __fixhfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -3883,7 +3883,7 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixhfdi@plt
+; RV32IZFHMIN-NEXT: call __fixhfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -3923,7 +3923,7 @@ define i64 @test_roundeven_si64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixhfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixhfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
@@ -4256,7 +4256,7 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZFH-NEXT: .cfi_def_cfa_offset 16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: .cfi_offset ra, -4
-; RV32IZFH-NEXT: call __fixunshfdi@plt
+; RV32IZFH-NEXT: call __fixunshfdi
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -4282,7 +4282,7 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: .cfi_offset ra, -4
-; RV32IZHINX-NEXT: call __fixunshfdi@plt
+; RV32IZHINX-NEXT: call __fixunshfdi
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -4320,7 +4320,7 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: .cfi_offset ra, -4
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: call __fixunshfdi@plt
+; RV32IZFHMIN-NEXT: call __fixunshfdi
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -4360,7 +4360,7 @@ define i64 @test_roundeven_ui64(half %x) {
; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
-; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt
+; RV32IZHINXMIN-NEXT: call __fixunshfdi
; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
index 55c30046366d..5f9866f08c82 100644
--- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
+++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll
@@ -149,7 +149,7 @@ define dso_local i32 @load_half() nounwind {
; RV32-NEXT: .LBB8_2: # %if.then
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT: call abort@plt
+; RV32-NEXT: call abort
;
; RV64-LABEL: load_half:
; RV64: # %bb.0: # %entry
@@ -163,7 +163,7 @@ define dso_local i32 @load_half() nounwind {
; RV64-NEXT: .LBB8_2: # %if.then
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT: call abort@plt
+; RV64-NEXT: call abort
entry:
%0 = load i16, ptr getelementptr inbounds ([6 x i16], ptr @foo, i32 0, i32 4), align 2
%cmp = icmp eq i16 %0, 140
diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
index 0ead223d87ca..0c9528f291c8 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr-callee.ll
@@ -18,14 +18,14 @@ define dso_local void @handler() nounwind {
; CHECK-RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK-RV32-NEXT: lui a0, 2
; CHECK-RV32-NEXT: addi a0, a0, 4
-; CHECK-RV32-NEXT: call read@plt
+; CHECK-RV32-NEXT: call read
; CHECK-RV32-NEXT: mv s0, a0
-; CHECK-RV32-NEXT: call callee@plt
+; CHECK-RV32-NEXT: call callee
; CHECK-RV32-NEXT: mv a0, s0
; CHECK-RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: addi sp, sp, 16
-; CHECK-RV32-NEXT: tail write@plt
+; CHECK-RV32-NEXT: tail write
;
; CHECK-RV32-F-LABEL: handler:
; CHECK-RV32-F: # %bb.0: # %entry
@@ -34,14 +34,14 @@ define dso_local void @handler() nounwind {
; CHECK-RV32-F-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK-RV32-F-NEXT: lui a0, 2
; CHECK-RV32-F-NEXT: addi a0, a0, 4
-; CHECK-RV32-F-NEXT: call read@plt
+; CHECK-RV32-F-NEXT: call read
; CHECK-RV32-F-NEXT: mv s0, a0
-; CHECK-RV32-F-NEXT: call callee@plt
+; CHECK-RV32-F-NEXT: call callee
; CHECK-RV32-F-NEXT: mv a0, s0
; CHECK-RV32-F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-F-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-F-NEXT: addi sp, sp, 16
-; CHECK-RV32-F-NEXT: tail write@plt
+; CHECK-RV32-F-NEXT: tail write
;
; CHECK-RV32-FD-LABEL: handler:
; CHECK-RV32-FD: # %bb.0: # %entry
@@ -50,14 +50,14 @@ define dso_local void @handler() nounwind {
; CHECK-RV32-FD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK-RV32-FD-NEXT: lui a0, 2
; CHECK-RV32-FD-NEXT: addi a0, a0, 4
-; CHECK-RV32-FD-NEXT: call read@plt
+; CHECK-RV32-FD-NEXT: call read
; CHECK-RV32-FD-NEXT: mv s0, a0
-; CHECK-RV32-FD-NEXT: call callee@plt
+; CHECK-RV32-FD-NEXT: call callee
; CHECK-RV32-FD-NEXT: mv a0, s0
; CHECK-RV32-FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-RV32-FD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK-RV32-FD-NEXT: addi sp, sp, 16
-; CHECK-RV32-FD-NEXT: tail write@plt
+; CHECK-RV32-FD-NEXT: tail write
entry:
%call = tail call i32 @read(i32 8196)
tail call void @callee()
diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
index 7d80c34eabcb..263743d39a8e 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll
@@ -185,7 +185,7 @@ define void @foo_float() nounwind #0 {
; CHECK-RV32-NEXT: lw a0, %lo(e)(a0)
; CHECK-RV32-NEXT: lui a1, %hi(f)
; CHECK-RV32-NEXT: lw a1, %lo(f)(a1)
-; CHECK-RV32-NEXT: call __addsf3@plt
+; CHECK-RV32-NEXT: call __addsf3
; CHECK-RV32-NEXT: lui a1, %hi(d)
; CHECK-RV32-NEXT: sw a0, %lo(d)(a1)
; CHECK-RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
@@ -280,7 +280,7 @@ define void @foo_fp_float() nounwind #1 {
; CHECK-RV32-NEXT: lw a0, %lo(e)(a0)
; CHECK-RV32-NEXT: lui a1, %hi(f)
; CHECK-RV32-NEXT: lw a1, %lo(f)(a1)
-; CHECK-RV32-NEXT: call __addsf3@plt
+; CHECK-RV32-NEXT: call __addsf3
; CHECK-RV32-NEXT: lui a1, %hi(d)
; CHECK-RV32-NEXT: sw a0, %lo(d)(a1)
; CHECK-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
@@ -387,7 +387,7 @@ define void @foo_double() nounwind #0 {
; CHECK-RV32-NEXT: lui a3, %hi(i)
; CHECK-RV32-NEXT: lw a2, %lo(i)(a3)
; CHECK-RV32-NEXT: lw a3, %lo(i+4)(a3)
-; CHECK-RV32-NEXT: call __adddf3@plt
+; CHECK-RV32-NEXT: call __adddf3
; CHECK-RV32-NEXT: lui a2, %hi(g)
; CHECK-RV32-NEXT: sw a1, %lo(g+4)(a2)
; CHECK-RV32-NEXT: sw a0, %lo(g)(a2)
@@ -467,7 +467,7 @@ define void @foo_double() nounwind #0 {
; CHECK-RV32IF-NEXT: lui a3, %hi(i)
; CHECK-RV32IF-NEXT: lw a2, %lo(i)(a3)
; CHECK-RV32IF-NEXT: lw a3, %lo(i+4)(a3)
-; CHECK-RV32IF-NEXT: call __adddf3@plt
+; CHECK-RV32IF-NEXT: call __adddf3
; CHECK-RV32IF-NEXT: lui a2, %hi(g)
; CHECK-RV32IF-NEXT: sw a1, %lo(g+4)(a2)
; CHECK-RV32IF-NEXT: sw a0, %lo(g)(a2)
@@ -578,7 +578,7 @@ define void @foo_fp_double() nounwind #1 {
; CHECK-RV32-NEXT: lui a3, %hi(i)
; CHECK-RV32-NEXT: lw a2, %lo(i)(a3)
; CHECK-RV32-NEXT: lw a3, %lo(i+4)(a3)
-; CHECK-RV32-NEXT: call __adddf3@plt
+; CHECK-RV32-NEXT: call __adddf3
; CHECK-RV32-NEXT: lui a2, %hi(g)
; CHECK-RV32-NEXT: sw a1, %lo(g+4)(a2)
; CHECK-RV32-NEXT: sw a0, %lo(g)(a2)
@@ -661,7 +661,7 @@ define void @foo_fp_double() nounwind #1 {
; CHECK-RV32IF-NEXT: lui a3, %hi(i)
; CHECK-RV32IF-NEXT: lw a2, %lo(i)(a3)
; CHECK-RV32IF-NEXT: lw a3, %lo(i+4)(a3)
-; CHECK-RV32IF-NEXT: call __adddf3@plt
+; CHECK-RV32IF-NEXT: call __adddf3
; CHECK-RV32IF-NEXT: lui a2, %hi(g)
; CHECK-RV32IF-NEXT: sw a1, %lo(g+4)(a2)
; CHECK-RV32IF-NEXT: sw a0, %lo(g)(a2)
diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr.ll b/llvm/test/CodeGen/RISCV/interrupt-attr.ll
index 5b269014b8f8..5887968042cb 100644
--- a/llvm/test/CodeGen/RISCV/interrupt-attr.ll
+++ b/llvm/test/CodeGen/RISCV/interrupt-attr.ll
@@ -63,7 +63,7 @@ define void @foo_with_call() #1 {
; CHECK-RV32-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
; CHECK-RV32-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
; CHECK-RV32-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
-; CHECK-RV32-NEXT: call otherfoo@plt
+; CHECK-RV32-NEXT: call otherfoo
; CHECK-RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
@@ -134,7 +134,7 @@ define void @foo_with_call() #1 {
; CHECK-RV32-F-NEXT: fsw ft9, 8(sp) # 4-byte Folded Spill
; CHECK-RV32-F-NEXT: fsw ft10, 4(sp) # 4-byte Folded Spill
; CHECK-RV32-F-NEXT: fsw ft11, 0(sp) # 4-byte Folded Spill
-; CHECK-RV32-F-NEXT: call otherfoo@plt
+; CHECK-RV32-F-NEXT: call otherfoo
; CHECK-RV32-F-NEXT: lw ra, 188(sp) # 4-byte Folded Reload
; CHECK-RV32-F-NEXT: lw t0, 184(sp) # 4-byte Folded Reload
; CHECK-RV32-F-NEXT: lw t1, 180(sp) # 4-byte Folded Reload
@@ -237,7 +237,7 @@ define void @foo_with_call() #1 {
; CHECK-RV32-FD-NEXT: fsd ft9, 16(sp) # 8-byte Folded Spill
; CHECK-RV32-FD-NEXT: fsd ft10, 8(sp) # 8-byte Folded Spill
; CHECK-RV32-FD-NEXT: fsd ft11, 0(sp) # 8-byte Folded Spill
-; CHECK-RV32-FD-NEXT: call otherfoo@plt
+; CHECK-RV32-FD-NEXT: call otherfoo
; CHECK-RV32-FD-NEXT: lw ra, 316(sp) # 4-byte Folded Reload
; CHECK-RV32-FD-NEXT: lw t0, 312(sp) # 4-byte Folded Reload
; CHECK-RV32-FD-NEXT: lw t1, 308(sp) # 4-byte Folded Reload
@@ -308,7 +308,7 @@ define void @foo_with_call() #1 {
; CHECK-RV64-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
; CHECK-RV64-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
; CHECK-RV64-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
-; CHECK-RV64-NEXT: call otherfoo@plt
+; CHECK-RV64-NEXT: call otherfoo
; CHECK-RV64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
@@ -379,7 +379,7 @@ define void @foo_with_call() #1 {
; CHECK-RV64-F-NEXT: fsw ft9, 8(sp) # 4-byte Folded Spill
; CHECK-RV64-F-NEXT: fsw ft10, 4(sp) # 4-byte Folded Spill
; CHECK-RV64-F-NEXT: fsw ft11, 0(sp) # 4-byte Folded Spill
-; CHECK-RV64-F-NEXT: call otherfoo@plt
+; CHECK-RV64-F-NEXT: call otherfoo
; CHECK-RV64-F-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; CHECK-RV64-F-NEXT: ld t0, 240(sp) # 8-byte Folded Reload
; CHECK-RV64-F-NEXT: ld t1, 232(sp) # 8-byte Folded Reload
@@ -482,7 +482,7 @@ define void @foo_with_call() #1 {
; CHECK-RV64-FD-NEXT: fsd ft9, 16(sp) # 8-byte Folded Spill
; CHECK-RV64-FD-NEXT: fsd ft10, 8(sp) # 8-byte Folded Spill
; CHECK-RV64-FD-NEXT: fsd ft11, 0(sp) # 8-byte Folded Spill
-; CHECK-RV64-FD-NEXT: call otherfoo@plt
+; CHECK-RV64-FD-NEXT: call otherfoo
; CHECK-RV64-FD-NEXT: ld ra, 376(sp) # 8-byte Folded Reload
; CHECK-RV64-FD-NEXT: ld t0, 368(sp) # 8-byte Folded Reload
; CHECK-RV64-FD-NEXT: ld t1, 360(sp) # 8-byte Folded Reload
@@ -563,7 +563,7 @@ define void @foo_fp_with_call() #2 {
; CHECK-RV32-NEXT: sw t5, 16(sp) # 4-byte Folded Spill
; CHECK-RV32-NEXT: sw t6, 12(sp) # 4-byte Folded Spill
; CHECK-RV32-NEXT: addi s0, sp, 80
-; CHECK-RV32-NEXT: call otherfoo@plt
+; CHECK-RV32-NEXT: call otherfoo
; CHECK-RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw t0, 72(sp) # 4-byte Folded Reload
; CHECK-RV32-NEXT: lw t1, 68(sp) # 4-byte Folded Reload
@@ -637,7 +637,7 @@ define void @foo_fp_with_call() #2 {
; CHECK-RV32-F-NEXT: fsw ft10, 16(sp) # 4-byte Folded Spill
; CHECK-RV32-F-NEXT: fsw ft11, 12(sp) # 4-byte Folded Spill
; CHECK-RV32-F-NEXT: addi s0, sp, 208
-; CHECK-RV32-F-NEXT: call otherfoo@plt
+; CHECK-RV32-F-NEXT: call otherfoo
; CHECK-RV32-F-NEXT: lw ra, 204(sp) # 4-byte Folded Reload
; CHECK-RV32-F-NEXT: lw t0, 200(sp) # 4-byte Folded Reload
; CHECK-RV32-F-NEXT: lw t1, 196(sp) # 4-byte Folded Reload
@@ -743,7 +743,7 @@ define void @foo_fp_with_call() #2 {
; CHECK-RV32-FD-NEXT: fsd ft10, 16(sp) # 8-byte Folded Spill
; CHECK-RV32-FD-NEXT: fsd ft11, 8(sp) # 8-byte Folded Spill
; CHECK-RV32-FD-NEXT: addi s0, sp, 336
-; CHECK-RV32-FD-NEXT: call otherfoo@plt
+; CHECK-RV32-FD-NEXT: call otherfoo
; CHECK-RV32-FD-NEXT: lw ra, 332(sp) # 4-byte Folded Reload
; CHECK-RV32-FD-NEXT: lw t0, 328(sp) # 4-byte Folded Reload
; CHECK-RV32-FD-NEXT: lw t1, 324(sp) # 4-byte Folded Reload
@@ -817,7 +817,7 @@ define void @foo_fp_with_call() #2 {
; CHECK-RV64-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
; CHECK-RV64-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
; CHECK-RV64-NEXT: addi s0, sp, 144
-; CHECK-RV64-NEXT: call otherfoo@plt
+; CHECK-RV64-NEXT: call otherfoo
; CHECK-RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld t0, 128(sp) # 8-byte Folded Reload
; CHECK-RV64-NEXT: ld t1, 120(sp) # 8-byte Folded Reload
@@ -891,7 +891,7 @@ define void @foo_fp_with_call() #2 {
; CHECK-RV64-F-NEXT: fsw ft10, 12(sp) # 4-byte Folded Spill
; CHECK-RV64-F-NEXT: fsw ft11, 8(sp) # 4-byte Folded Spill
; CHECK-RV64-F-NEXT: addi s0, sp, 272
-; CHECK-RV64-F-NEXT: call otherfoo@plt
+; CHECK-RV64-F-NEXT: call otherfoo
; CHECK-RV64-F-NEXT: ld ra, 264(sp) # 8-byte Folded Reload
; CHECK-RV64-F-NEXT: ld t0, 256(sp) # 8-byte Folded Reload
; CHECK-RV64-F-NEXT: ld t1, 248(sp) # 8-byte Folded Reload
@@ -997,7 +997,7 @@ define void @foo_fp_with_call() #2 {
; CHECK-RV64-FD-NEXT: fsd ft10, 16(sp) # 8-byte Folded Spill
; CHECK-RV64-FD-NEXT: fsd ft11, 8(sp) # 8-byte Folded Spill
; CHECK-RV64-FD-NEXT: addi s0, sp, 400
-; CHECK-RV64-FD-NEXT: call otherfoo@plt
+; CHECK-RV64-FD-NEXT: call otherfoo
; CHECK-RV64-FD-NEXT: ld ra, 392(sp) # 8-byte Folded Reload
; CHECK-RV64-FD-NEXT: ld t0, 384(sp) # 8-byte Folded Reload
; CHECK-RV64-FD-NEXT: ld t1, 376(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index 60f72c41e836..bafa92e06834 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -74,7 +74,7 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
; RV32-NEXT: li a2, 8
; RV32-NEXT: li a1, 0
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __muldi3@plt
+; RV32-NEXT: call __muldi3
; RV32-NEXT: sw a1, 20(sp)
; RV32-NEXT: sw a0, 16(sp)
; RV32-NEXT: addi a2, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
index 043d48918e3b..541fb3774257 100644
--- a/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
+++ b/llvm/test/CodeGen/RISCV/libcall-tail-calls.ll
@@ -28,7 +28,7 @@ define zeroext i8 @udiv8(i8 zeroext %a, i8 zeroext %b) nounwind {
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT: call __udivsi3@plt
+; RV32-ALL-NEXT: call __udivsi3
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -37,7 +37,7 @@ define zeroext i8 @udiv8(i8 zeroext %a, i8 zeroext %b) nounwind {
; RV64-ALL: # %bb.0:
; RV64-ALL-NEXT: addi sp, sp, -16
; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT: call __udivdi3@plt
+; RV64-ALL-NEXT: call __udivdi3
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
@@ -50,7 +50,7 @@ define signext i16 @sdiv16(i16 signext %a, i16 signext %b) nounwind {
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT: call __divsi3@plt
+; RV32-ALL-NEXT: call __divsi3
; RV32-ALL-NEXT: slli a0, a0, 16
; RV32-ALL-NEXT: srai a0, a0, 16
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -61,7 +61,7 @@ define signext i16 @sdiv16(i16 signext %a, i16 signext %b) nounwind {
; RV64-ALL: # %bb.0:
; RV64-ALL-NEXT: addi sp, sp, -16
; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT: call __divdi3@plt
+; RV64-ALL-NEXT: call __divdi3
; RV64-ALL-NEXT: slli a0, a0, 48
; RV64-ALL-NEXT: srai a0, a0, 48
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -76,7 +76,7 @@ define signext i32 @mul32(i32 %a, i32 %b) nounwind {
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT: call __mulsi3@plt
+; RV32-ALL-NEXT: call __mulsi3
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -85,7 +85,7 @@ define signext i32 @mul32(i32 %a, i32 %b) nounwind {
; RV64-ALL: # %bb.0:
; RV64-ALL-NEXT: addi sp, sp, -16
; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ALL-NEXT: call __muldi3@plt
+; RV64-ALL-NEXT: call __muldi3
; RV64-ALL-NEXT: sext.w a0, a0
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
@@ -99,14 +99,14 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT: call __muldi3@plt
+; RV32-ALL-NEXT: call __muldi3
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
;
; RV64-ALL-LABEL: mul64:
; RV64-ALL: # %bb.0:
-; RV64-ALL-NEXT: tail __muldi3@plt
+; RV64-ALL-NEXT: tail __muldi3
%1 = mul i64 %a, %b
ret i64 %1
}
@@ -120,9 +120,9 @@ define half @sin_f16(half %a) nounwind {
; RV32IFD-ILP32D: # %bb.0:
; RV32IFD-ILP32D-NEXT: addi sp, sp, -16
; RV32IFD-ILP32D-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-ILP32D-NEXT: call __extendhfsf2@plt
-; RV32IFD-ILP32D-NEXT: call sinf@plt
-; RV32IFD-ILP32D-NEXT: call __truncsfhf2@plt
+; RV32IFD-ILP32D-NEXT: call __extendhfsf2
+; RV32IFD-ILP32D-NEXT: call sinf
+; RV32IFD-ILP32D-NEXT: call __truncsfhf2
; RV32IFD-ILP32D-NEXT: fmv.x.w a0, fa0
; RV32IFD-ILP32D-NEXT: lui a1, 1048560
; RV32IFD-ILP32D-NEXT: or a0, a0, a1
@@ -135,9 +135,9 @@ define half @sin_f16(half %a) nounwind {
; RV32IF-ILP32F: # %bb.0:
; RV32IF-ILP32F-NEXT: addi sp, sp, -16
; RV32IF-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-ILP32F-NEXT: call __extendhfsf2@plt
-; RV32IF-ILP32F-NEXT: call sinf@plt
-; RV32IF-ILP32F-NEXT: call __truncsfhf2@plt
+; RV32IF-ILP32F-NEXT: call __extendhfsf2
+; RV32IF-ILP32F-NEXT: call sinf
+; RV32IF-ILP32F-NEXT: call __truncsfhf2
; RV32IF-ILP32F-NEXT: fmv.x.w a0, fa0
; RV32IF-ILP32F-NEXT: lui a1, 1048560
; RV32IF-ILP32F-NEXT: or a0, a0, a1
@@ -150,9 +150,9 @@ define half @sin_f16(half %a) nounwind {
; RV32IFD-ILP32: # %bb.0:
; RV32IFD-ILP32-NEXT: addi sp, sp, -16
; RV32IFD-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-ILP32-NEXT: call __extendhfsf2@plt
-; RV32IFD-ILP32-NEXT: call sinf@plt
-; RV32IFD-ILP32-NEXT: call __truncsfhf2@plt
+; RV32IFD-ILP32-NEXT: call __extendhfsf2
+; RV32IFD-ILP32-NEXT: call sinf
+; RV32IFD-ILP32-NEXT: call __truncsfhf2
; RV32IFD-ILP32-NEXT: lui a1, 1048560
; RV32IFD-ILP32-NEXT: or a0, a0, a1
; RV32IFD-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -165,9 +165,9 @@ define half @sin_f16(half %a) nounwind {
; RV32I-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-ILP32-NEXT: slli a0, a0, 16
; RV32I-ILP32-NEXT: srli a0, a0, 16
-; RV32I-ILP32-NEXT: call __extendhfsf2@plt
-; RV32I-ILP32-NEXT: call sinf@plt
-; RV32I-ILP32-NEXT: call __truncsfhf2@plt
+; RV32I-ILP32-NEXT: call __extendhfsf2
+; RV32I-ILP32-NEXT: call sinf
+; RV32I-ILP32-NEXT: call __truncsfhf2
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -176,9 +176,9 @@ define half @sin_f16(half %a) nounwind {
; RV64IFD-LP64D: # %bb.0:
; RV64IFD-LP64D-NEXT: addi sp, sp, -16
; RV64IFD-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-LP64D-NEXT: call __extendhfsf2@plt
-; RV64IFD-LP64D-NEXT: call sinf@plt
-; RV64IFD-LP64D-NEXT: call __truncsfhf2@plt
+; RV64IFD-LP64D-NEXT: call __extendhfsf2
+; RV64IFD-LP64D-NEXT: call sinf
+; RV64IFD-LP64D-NEXT: call __truncsfhf2
; RV64IFD-LP64D-NEXT: fmv.x.w a0, fa0
; RV64IFD-LP64D-NEXT: lui a1, 1048560
; RV64IFD-LP64D-NEXT: or a0, a0, a1
@@ -191,9 +191,9 @@ define half @sin_f16(half %a) nounwind {
; RV64IF-LP64F: # %bb.0:
; RV64IF-LP64F-NEXT: addi sp, sp, -16
; RV64IF-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-LP64F-NEXT: call __extendhfsf2@plt
-; RV64IF-LP64F-NEXT: call sinf@plt
-; RV64IF-LP64F-NEXT: call __truncsfhf2@plt
+; RV64IF-LP64F-NEXT: call __extendhfsf2
+; RV64IF-LP64F-NEXT: call sinf
+; RV64IF-LP64F-NEXT: call __truncsfhf2
; RV64IF-LP64F-NEXT: fmv.x.w a0, fa0
; RV64IF-LP64F-NEXT: lui a1, 1048560
; RV64IF-LP64F-NEXT: or a0, a0, a1
@@ -206,9 +206,9 @@ define half @sin_f16(half %a) nounwind {
; RV64IFD-LP64: # %bb.0:
; RV64IFD-LP64-NEXT: addi sp, sp, -16
; RV64IFD-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-LP64-NEXT: call __extendhfsf2@plt
-; RV64IFD-LP64-NEXT: call sinf@plt
-; RV64IFD-LP64-NEXT: call __truncsfhf2@plt
+; RV64IFD-LP64-NEXT: call __extendhfsf2
+; RV64IFD-LP64-NEXT: call sinf
+; RV64IFD-LP64-NEXT: call __truncsfhf2
; RV64IFD-LP64-NEXT: lui a1, 1048560
; RV64IFD-LP64-NEXT: or a0, a0, a1
; RV64IFD-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -221,9 +221,9 @@ define half @sin_f16(half %a) nounwind {
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-LP64-NEXT: slli a0, a0, 48
; RV64I-LP64-NEXT: srli a0, a0, 48
-; RV64I-LP64-NEXT: call __extendhfsf2@plt
-; RV64I-LP64-NEXT: call sinf@plt
-; RV64I-LP64-NEXT: call __truncsfhf2@plt
+; RV64I-LP64-NEXT: call __extendhfsf2
+; RV64I-LP64-NEXT: call sinf
+; RV64I-LP64-NEXT: call __truncsfhf2
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -238,17 +238,17 @@ declare float @llvm.sin.f32(float)
define float @sin_f32(float %a) nounwind {
; F-ABI-ALL-LABEL: sin_f32:
; F-ABI-ALL: # %bb.0:
-; F-ABI-ALL-NEXT: tail sinf@plt
+; F-ABI-ALL-NEXT: tail sinf
;
; RV32IFD-ILP32-LABEL: sin_f32:
; RV32IFD-ILP32: # %bb.0:
-; RV32IFD-ILP32-NEXT: tail sinf@plt
+; RV32IFD-ILP32-NEXT: tail sinf
;
; RV32I-ILP32-LABEL: sin_f32:
; RV32I-ILP32: # %bb.0:
; RV32I-ILP32-NEXT: addi sp, sp, -16
; RV32I-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT: call sinf@plt
+; RV32I-ILP32-NEXT: call sinf
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -257,7 +257,7 @@ define float @sin_f32(float %a) nounwind {
; RV64-LP64-ALL: # %bb.0:
; RV64-LP64-ALL-NEXT: addi sp, sp, -16
; RV64-LP64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64-ALL-NEXT: call sinf@plt
+; RV64-LP64-ALL-NEXT: call sinf
; RV64-LP64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64-ALL-NEXT: addi sp, sp, 16
; RV64-LP64-ALL-NEXT: ret
@@ -270,21 +270,21 @@ declare float @llvm.powi.f32.i32(float, i32)
define float @powi_f32(float %a, i32 %b) nounwind {
; RV32IFD-ILP32D-LABEL: powi_f32:
; RV32IFD-ILP32D: # %bb.0:
-; RV32IFD-ILP32D-NEXT: tail __powisf2@plt
+; RV32IFD-ILP32D-NEXT: tail __powisf2
;
; RV32IF-ILP32F-LABEL: powi_f32:
; RV32IF-ILP32F: # %bb.0:
-; RV32IF-ILP32F-NEXT: tail __powisf2@plt
+; RV32IF-ILP32F-NEXT: tail __powisf2
;
; RV32IFD-ILP32-LABEL: powi_f32:
; RV32IFD-ILP32: # %bb.0:
-; RV32IFD-ILP32-NEXT: tail __powisf2@plt
+; RV32IFD-ILP32-NEXT: tail __powisf2
;
; RV32I-ILP32-LABEL: powi_f32:
; RV32I-ILP32: # %bb.0:
; RV32I-ILP32-NEXT: addi sp, sp, -16
; RV32I-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-ILP32-NEXT: call __powisf2@plt
+; RV32I-ILP32-NEXT: call __powisf2
; RV32I-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-ILP32-NEXT: addi sp, sp, 16
; RV32I-ILP32-NEXT: ret
@@ -294,7 +294,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV64IFD-LP64D-NEXT: addi sp, sp, -16
; RV64IFD-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-LP64D-NEXT: sext.w a0, a0
-; RV64IFD-LP64D-NEXT: call __powisf2@plt
+; RV64IFD-LP64D-NEXT: call __powisf2
; RV64IFD-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-LP64D-NEXT: addi sp, sp, 16
; RV64IFD-LP64D-NEXT: ret
@@ -304,7 +304,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV64IF-LP64F-NEXT: addi sp, sp, -16
; RV64IF-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-LP64F-NEXT: sext.w a0, a0
-; RV64IF-LP64F-NEXT: call __powisf2@plt
+; RV64IF-LP64F-NEXT: call __powisf2
; RV64IF-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-LP64F-NEXT: addi sp, sp, 16
; RV64IF-LP64F-NEXT: ret
@@ -314,7 +314,7 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV64-LP64-ALL-NEXT: addi sp, sp, -16
; RV64-LP64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64-ALL-NEXT: sext.w a1, a1
-; RV64-LP64-ALL-NEXT: call __powisf2@plt
+; RV64-LP64-ALL-NEXT: call __powisf2
; RV64-LP64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64-ALL-NEXT: addi sp, sp, 16
; RV64-LP64-ALL-NEXT: ret
@@ -329,7 +329,7 @@ define i64 @llround_f32(float %a) nounwind {
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT: call llroundf@plt
+; RV32-ALL-NEXT: call llroundf
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -354,7 +354,7 @@ define i64 @llround_f32(float %a) nounwind {
; RV64I-LP64: # %bb.0:
; RV64I-LP64-NEXT: addi sp, sp, -16
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT: call llroundf@plt
+; RV64I-LP64-NEXT: call llroundf
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -369,13 +369,13 @@ declare double @llvm.sin.f64(double)
define double @sin_f64(double %a) nounwind {
; D-ABI-ALL-LABEL: sin_f64:
; D-ABI-ALL: # %bb.0:
-; D-ABI-ALL-NEXT: tail sin@plt
+; D-ABI-ALL-NEXT: tail sin
;
; RV32IF-ILP32F-LABEL: sin_f64:
; RV32IF-ILP32F: # %bb.0:
; RV32IF-ILP32F-NEXT: addi sp, sp, -16
; RV32IF-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-ILP32F-NEXT: call sin@plt
+; RV32IF-ILP32F-NEXT: call sin
; RV32IF-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-ILP32F-NEXT: addi sp, sp, 16
; RV32IF-ILP32F-NEXT: ret
@@ -384,7 +384,7 @@ define double @sin_f64(double %a) nounwind {
; RV32-ILP32-ALL: # %bb.0:
; RV32-ILP32-ALL-NEXT: addi sp, sp, -16
; RV32-ILP32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32-ALL-NEXT: call sin@plt
+; RV32-ILP32-ALL-NEXT: call sin
; RV32-ILP32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32-ALL-NEXT: addi sp, sp, 16
; RV32-ILP32-ALL-NEXT: ret
@@ -393,20 +393,20 @@ define double @sin_f64(double %a) nounwind {
; RV64IF-LP64F: # %bb.0:
; RV64IF-LP64F-NEXT: addi sp, sp, -16
; RV64IF-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-LP64F-NEXT: call sin@plt
+; RV64IF-LP64F-NEXT: call sin
; RV64IF-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-LP64F-NEXT: addi sp, sp, 16
; RV64IF-LP64F-NEXT: ret
;
; RV64IFD-LP64-LABEL: sin_f64:
; RV64IFD-LP64: # %bb.0:
-; RV64IFD-LP64-NEXT: tail sin@plt
+; RV64IFD-LP64-NEXT: tail sin
;
; RV64I-LP64-LABEL: sin_f64:
; RV64I-LP64: # %bb.0:
; RV64I-LP64-NEXT: addi sp, sp, -16
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT: call sin@plt
+; RV64I-LP64-NEXT: call sin
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -419,13 +419,13 @@ declare double @llvm.powi.f64.i32(double, i32)
define double @powi_f64(double %a, i32 %b) nounwind {
; RV32IFD-ILP32D-LABEL: powi_f64:
; RV32IFD-ILP32D: # %bb.0:
-; RV32IFD-ILP32D-NEXT: tail __powidf2@plt
+; RV32IFD-ILP32D-NEXT: tail __powidf2
;
; RV32IF-ILP32F-LABEL: powi_f64:
; RV32IF-ILP32F: # %bb.0:
; RV32IF-ILP32F-NEXT: addi sp, sp, -16
; RV32IF-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IF-ILP32F-NEXT: call __powidf2@plt
+; RV32IF-ILP32F-NEXT: call __powidf2
; RV32IF-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-ILP32F-NEXT: addi sp, sp, 16
; RV32IF-ILP32F-NEXT: ret
@@ -434,7 +434,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV32-ILP32-ALL: # %bb.0:
; RV32-ILP32-ALL-NEXT: addi sp, sp, -16
; RV32-ILP32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32-ALL-NEXT: call __powidf2@plt
+; RV32-ILP32-ALL-NEXT: call __powidf2
; RV32-ILP32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ILP32-ALL-NEXT: addi sp, sp, 16
; RV32-ILP32-ALL-NEXT: ret
@@ -444,7 +444,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV64IFD-LP64D-NEXT: addi sp, sp, -16
; RV64IFD-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-LP64D-NEXT: sext.w a0, a0
-; RV64IFD-LP64D-NEXT: call __powidf2@plt
+; RV64IFD-LP64D-NEXT: call __powidf2
; RV64IFD-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-LP64D-NEXT: addi sp, sp, 16
; RV64IFD-LP64D-NEXT: ret
@@ -454,7 +454,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV64IF-LP64F-NEXT: addi sp, sp, -16
; RV64IF-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-LP64F-NEXT: sext.w a1, a1
-; RV64IF-LP64F-NEXT: call __powidf2@plt
+; RV64IF-LP64F-NEXT: call __powidf2
; RV64IF-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-LP64F-NEXT: addi sp, sp, 16
; RV64IF-LP64F-NEXT: ret
@@ -464,7 +464,7 @@ define double @powi_f64(double %a, i32 %b) nounwind {
; RV64-LP64-ALL-NEXT: addi sp, sp, -16
; RV64-LP64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-LP64-ALL-NEXT: sext.w a1, a1
-; RV64-LP64-ALL-NEXT: call __powidf2@plt
+; RV64-LP64-ALL-NEXT: call __powidf2
; RV64-LP64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-LP64-ALL-NEXT: addi sp, sp, 16
; RV64-LP64-ALL-NEXT: ret
@@ -479,7 +479,7 @@ define i64 @llround_f64(double %a) nounwind {
; RV32-ALL: # %bb.0:
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ALL-NEXT: call llround@plt
+; RV32-ALL-NEXT: call llround
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -493,7 +493,7 @@ define i64 @llround_f64(double %a) nounwind {
; RV64IF-LP64F: # %bb.0:
; RV64IF-LP64F-NEXT: addi sp, sp, -16
; RV64IF-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-LP64F-NEXT: call llround@plt
+; RV64IF-LP64F-NEXT: call llround
; RV64IF-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-LP64F-NEXT: addi sp, sp, 16
; RV64IF-LP64F-NEXT: ret
@@ -508,7 +508,7 @@ define i64 @llround_f64(double %a) nounwind {
; RV64I-LP64: # %bb.0:
; RV64I-LP64-NEXT: addi sp, sp, -16
; RV64I-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-LP64-NEXT: call llround@plt
+; RV64I-LP64-NEXT: call llround
; RV64I-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-LP64-NEXT: addi sp, sp, 16
; RV64I-LP64-NEXT: ret
@@ -524,7 +524,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ALL-NEXT: li a1, 0
-; RV32-ALL-NEXT: call __atomic_load_1@plt
+; RV32-ALL-NEXT: call __atomic_load_1
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -534,7 +534,7 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV64-ALL-NEXT: addi sp, sp, -16
; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ALL-NEXT: li a1, 0
-; RV64-ALL-NEXT: call __atomic_load_1@plt
+; RV64-ALL-NEXT: call __atomic_load_1
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
@@ -548,7 +548,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ALL-NEXT: li a2, 3
-; RV32-ALL-NEXT: call __atomic_fetch_add_2@plt
+; RV32-ALL-NEXT: call __atomic_fetch_add_2
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -558,7 +558,7 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
; RV64-ALL-NEXT: addi sp, sp, -16
; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ALL-NEXT: li a2, 3
-; RV64-ALL-NEXT: call __atomic_fetch_add_2@plt
+; RV64-ALL-NEXT: call __atomic_fetch_add_2
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
@@ -572,7 +572,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ALL-NEXT: li a2, 4
-; RV32-ALL-NEXT: call __atomic_fetch_xor_4@plt
+; RV32-ALL-NEXT: call __atomic_fetch_xor_4
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -582,7 +582,7 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64-ALL-NEXT: addi sp, sp, -16
; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ALL-NEXT: li a2, 4
-; RV64-ALL-NEXT: call __atomic_fetch_xor_4@plt
+; RV64-ALL-NEXT: call __atomic_fetch_xor_4
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
@@ -596,7 +596,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32-ALL-NEXT: addi sp, sp, -16
; RV32-ALL-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-ALL-NEXT: li a3, 5
-; RV32-ALL-NEXT: call __atomic_fetch_nand_8@plt
+; RV32-ALL-NEXT: call __atomic_fetch_nand_8
; RV32-ALL-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-ALL-NEXT: addi sp, sp, 16
; RV32-ALL-NEXT: ret
@@ -606,7 +606,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64-ALL-NEXT: addi sp, sp, -16
; RV64-ALL-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-ALL-NEXT: li a2, 5
-; RV64-ALL-NEXT: call __atomic_fetch_nand_8@plt
+; RV64-ALL-NEXT: call __atomic_fetch_nand_8
; RV64-ALL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-ALL-NEXT: addi sp, sp, 16
; RV64-ALL-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/live-sp.mir b/llvm/test/CodeGen/RISCV/live-sp.mir
index 5df6a79755cb..df72b1ddfebc 100644
--- a/llvm/test/CodeGen/RISCV/live-sp.mir
+++ b/llvm/test/CodeGen/RISCV/live-sp.mir
@@ -79,7 +79,7 @@ body: |
; CHECK-NEXT: SW renamable $x1, $x2, 4 :: (store (s32) into %ir.a)
; CHECK-NEXT: renamable $x11 = ADDIW killed renamable $x1, 0
; CHECK-NEXT: $x10 = COPY $x0
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) @vararg, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit $x11, implicit-def $x2
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) @vararg, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit $x11, implicit-def $x2
; CHECK-NEXT: $x1 = LD $x2, 8 :: (load (s64) from %stack.1)
; CHECK-NEXT: $x2 = frame-destroy ADDI $x2, 16
; CHECK-NEXT: PseudoRET
@@ -87,7 +87,7 @@ body: |
renamable $x11 = ADDIW killed renamable $x1, 0
ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
$x10 = COPY $x0
- PseudoCALL target-flags(riscv-plt) @vararg, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit $x11, implicit-def $x2
+ PseudoCALL target-flags(riscv-call) @vararg, csr_ilp32_lp64, implicit-def dead $x1, implicit killed $x10, implicit $x11, implicit-def $x2
ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
PseudoRET
diff --git a/llvm/test/CodeGen/RISCV/llvm.exp10.ll b/llvm/test/CodeGen/RISCV/llvm.exp10.ll
index bfac15e009f0..6fde86733b07 100644
--- a/llvm/test/CodeGen/RISCV/llvm.exp10.ll
+++ b/llvm/test/CodeGen/RISCV/llvm.exp10.ll
@@ -29,9 +29,9 @@ define half @exp10_f16(half %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a0, fa0
; RV32IFD-NEXT: lui a1, 1048560
; RV32IFD-NEXT: or a0, a0, a1
@@ -46,9 +46,9 @@ define half @exp10_f16(half %x) {
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: lui a1, 1048560
; RV64IFD-NEXT: or a0, a0, a1
@@ -68,9 +68,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: fmv.w.x fa0, a0
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a0, fa0
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
@@ -83,9 +83,9 @@ define <1 x half> @exp10_v1f16(<1 x half> %x) {
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: fmv.w.x fa0, a0
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
@@ -107,14 +107,14 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
; RV32IFD-NEXT: .cfi_offset fs0, -16
; RV32IFD-NEXT: fmv.w.x fs0, a1
; RV32IFD-NEXT: fmv.w.x fa0, a0
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w s0, fa0
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a1, fa0
; RV32IFD-NEXT: mv a0, s0
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -135,14 +135,14 @@ define <2 x half> @exp10_v2f16(<2 x half> %x) {
; RV64IFD-NEXT: .cfi_offset s1, -24
; RV64IFD-NEXT: mv s0, a1
; RV64IFD-NEXT: fmv.w.x fa0, a0
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w s1, fa0
; RV64IFD-NEXT: fmv.w.x fa0, s0
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a1, fa0
; RV64IFD-NEXT: mv a0, s1
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -178,24 +178,24 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
; RV32IFD-NEXT: fmv.w.x fs0, a2
; RV32IFD-NEXT: fmv.w.x fs1, a3
; RV32IFD-NEXT: fmv.w.x fa0, a1
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.s fs2, fa0
; RV32IFD-NEXT: fmv.s fa0, fs1
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.x.w a0, fs2
; RV32IFD-NEXT: slli s1, a0, 16
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a0, fa0
; RV32IFD-NEXT: slli a0, a0, 16
; RV32IFD-NEXT: srli a0, a0, 16
; RV32IFD-NEXT: or s1, a0, s1
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a0, fa0
; RV32IFD-NEXT: sh a0, 4(s0)
; RV32IFD-NEXT: sw s1, 0(s0)
@@ -227,24 +227,24 @@ define <3 x half> @exp10_v3f16(<3 x half> %x) {
; RV64IFD-NEXT: lhu a1, 8(a1)
; RV64IFD-NEXT: mv s0, a0
; RV64IFD-NEXT: fmv.w.x fa0, a1
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.s fs0, fa0
; RV64IFD-NEXT: fmv.w.x fa0, s2
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.x.w a0, fs0
; RV64IFD-NEXT: slli s2, a0, 16
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: slli a0, a0, 48
; RV64IFD-NEXT: srli a0, a0, 48
; RV64IFD-NEXT: or s2, a0, s2
; RV64IFD-NEXT: fmv.w.x fa0, s1
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: sh a0, 4(s0)
; RV64IFD-NEXT: sw s2, 0(s0)
@@ -291,27 +291,27 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
; RV32IFD-NEXT: fmv.w.x fs1, a2
; RV32IFD-NEXT: fmv.w.x fs2, a3
; RV32IFD-NEXT: fmv.w.x fa0, a1
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.s fs3, fa0
; RV32IFD-NEXT: fmv.s fa0, fs2
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.s fs2, fa0
; RV32IFD-NEXT: fmv.s fa0, fs1
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.s fs1, fa0
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call __extendhfsf2@plt
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call __extendhfsf2
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.x.w s1, fs1
; RV32IFD-NEXT: fmv.x.w s2, fs2
; RV32IFD-NEXT: fmv.x.w s3, fs3
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a0, fa0
; RV32IFD-NEXT: sh a0, 6(s0)
; RV32IFD-NEXT: sh s3, 4(s0)
@@ -355,27 +355,27 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
; RV64IFD-NEXT: lhu a1, 16(a1)
; RV64IFD-NEXT: mv s0, a0
; RV64IFD-NEXT: fmv.w.x fa0, a1
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.s fs0, fa0
; RV64IFD-NEXT: fmv.w.x fa0, s3
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.s fs1, fa0
; RV64IFD-NEXT: fmv.w.x fa0, s2
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.s fs2, fa0
; RV64IFD-NEXT: fmv.w.x fa0, s1
-; RV64IFD-NEXT: call __extendhfsf2@plt
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call __extendhfsf2
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.x.w s1, fs2
; RV64IFD-NEXT: fmv.x.w s2, fs1
; RV64IFD-NEXT: fmv.x.w s3, fs0
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: sh a0, 6(s0)
; RV64IFD-NEXT: sh s3, 4(s0)
@@ -398,7 +398,7 @@ define <4 x half> @exp10_v4f16(<4 x half> %x) {
define float @exp10_f32(float %x) {
; CHECK-LABEL: exp10_f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: tail exp10f@plt
+; CHECK-NEXT: tail exp10f
%r = call float @llvm.exp10.f32(float %x)
ret float %r
}
@@ -410,7 +410,7 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -421,7 +421,7 @@ define <1 x float> @exp10_v1f32(<1 x float> %x) {
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -441,10 +441,10 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
; RV32IFD-NEXT: .cfi_offset fs0, -16
; RV32IFD-NEXT: .cfi_offset fs1, -24
; RV32IFD-NEXT: fmv.s fs0, fa1
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.s fs1, fa0
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.s fa1, fa0
; RV32IFD-NEXT: fmv.s fa0, fs1
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -464,10 +464,10 @@ define <2 x float> @exp10_v2f32(<2 x float> %x) {
; RV64IFD-NEXT: .cfi_offset fs0, -16
; RV64IFD-NEXT: .cfi_offset fs1, -24
; RV64IFD-NEXT: fmv.s fs0, fa1
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.s fs1, fa0
; RV64IFD-NEXT: fmv.s fa0, fs0
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.s fa1, fa0
; RV64IFD-NEXT: fmv.s fa0, fs1
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -497,13 +497,13 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
; RV32IFD-NEXT: fmv.s fs0, fa2
; RV32IFD-NEXT: fmv.s fs1, fa1
; RV32IFD-NEXT: mv s0, a0
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.s fs2, fa0
; RV32IFD-NEXT: fmv.s fa0, fs1
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.s fs1, fa0
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fsw fa0, 8(s0)
; RV32IFD-NEXT: fsw fs1, 4(s0)
; RV32IFD-NEXT: fsw fs2, 0(s0)
@@ -533,17 +533,17 @@ define <3 x float> @exp10_v3f32(<3 x float> %x) {
; RV64IFD-NEXT: fmv.s fs1, fa0
; RV64IFD-NEXT: mv s0, a0
; RV64IFD-NEXT: fmv.s fa0, fa1
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: slli s1, a0, 32
; RV64IFD-NEXT: fmv.s fa0, fs1
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: slli a0, a0, 32
; RV64IFD-NEXT: srli a0, a0, 32
; RV64IFD-NEXT: or s1, a0, s1
; RV64IFD-NEXT: fmv.s fa0, fs0
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fsw fa0, 8(s0)
; RV64IFD-NEXT: sd s1, 0(s0)
; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
@@ -578,16 +578,16 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; RV32IFD-NEXT: fmv.s fs1, fa2
; RV32IFD-NEXT: fmv.s fs2, fa1
; RV32IFD-NEXT: mv s0, a0
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.s fs3, fa0
; RV32IFD-NEXT: fmv.s fa0, fs2
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.s fs2, fa0
; RV32IFD-NEXT: fmv.s fa0, fs1
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fmv.s fs1, fa0
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call exp10f@plt
+; RV32IFD-NEXT: call exp10f
; RV32IFD-NEXT: fsw fa0, 12(s0)
; RV32IFD-NEXT: fsw fs1, 8(s0)
; RV32IFD-NEXT: fsw fs2, 4(s0)
@@ -621,16 +621,16 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
; RV64IFD-NEXT: fmv.s fs1, fa2
; RV64IFD-NEXT: fmv.s fs2, fa1
; RV64IFD-NEXT: mv s0, a0
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.s fs3, fa0
; RV64IFD-NEXT: fmv.s fa0, fs2
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.s fs2, fa0
; RV64IFD-NEXT: fmv.s fa0, fs1
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fmv.s fs1, fa0
; RV64IFD-NEXT: fmv.s fa0, fs0
-; RV64IFD-NEXT: call exp10f@plt
+; RV64IFD-NEXT: call exp10f
; RV64IFD-NEXT: fsw fa0, 12(s0)
; RV64IFD-NEXT: fsw fs1, 8(s0)
; RV64IFD-NEXT: fsw fs2, 4(s0)
@@ -650,7 +650,7 @@ define <4 x float> @exp10_v4f32(<4 x float> %x) {
define double @exp10_f64(double %x) {
; CHECK-LABEL: exp10_f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: tail exp10@plt
+; CHECK-NEXT: tail exp10
%r = call double @llvm.exp10.f64(double %x)
ret double %r
}
@@ -673,10 +673,10 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
; RV32IFD-NEXT: .cfi_offset fs0, -16
; RV32IFD-NEXT: .cfi_offset fs1, -24
; RV32IFD-NEXT: fmv.d fs0, fa1
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fmv.d fs1, fa0
; RV32IFD-NEXT: fmv.d fa0, fs0
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fmv.d fa1, fa0
; RV32IFD-NEXT: fmv.d fa0, fs1
; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -696,10 +696,10 @@ define <2 x double> @exp10_v2f64(<2 x double> %x) {
; RV64IFD-NEXT: .cfi_offset fs0, -16
; RV64IFD-NEXT: .cfi_offset fs1, -24
; RV64IFD-NEXT: fmv.d fs0, fa1
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fmv.d fs1, fa0
; RV64IFD-NEXT: fmv.d fa0, fs0
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fmv.d fa1, fa0
; RV64IFD-NEXT: fmv.d fa0, fs1
; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -729,13 +729,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
; RV32IFD-NEXT: fmv.d fs0, fa2
; RV32IFD-NEXT: fmv.d fs1, fa1
; RV32IFD-NEXT: mv s0, a0
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fmv.d fs2, fa0
; RV32IFD-NEXT: fmv.d fa0, fs1
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fmv.d fs1, fa0
; RV32IFD-NEXT: fmv.d fa0, fs0
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fsd fa0, 16(s0)
; RV32IFD-NEXT: fsd fs1, 8(s0)
; RV32IFD-NEXT: fsd fs2, 0(s0)
@@ -764,13 +764,13 @@ define <3 x double> @exp10_v3f64(<3 x double> %x) {
; RV64IFD-NEXT: fmv.d fs0, fa2
; RV64IFD-NEXT: fmv.d fs1, fa1
; RV64IFD-NEXT: mv s0, a0
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fmv.d fs2, fa0
; RV64IFD-NEXT: fmv.d fa0, fs1
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fmv.d fs1, fa0
; RV64IFD-NEXT: fmv.d fa0, fs0
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fsd fa0, 16(s0)
; RV64IFD-NEXT: fsd fs1, 8(s0)
; RV64IFD-NEXT: fsd fs2, 0(s0)
@@ -806,16 +806,16 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
; RV32IFD-NEXT: fmv.d fs1, fa2
; RV32IFD-NEXT: fmv.d fs2, fa1
; RV32IFD-NEXT: mv s0, a0
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fmv.d fs3, fa0
; RV32IFD-NEXT: fmv.d fa0, fs2
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fmv.d fs2, fa0
; RV32IFD-NEXT: fmv.d fa0, fs1
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fmv.d fs1, fa0
; RV32IFD-NEXT: fmv.d fa0, fs0
-; RV32IFD-NEXT: call exp10@plt
+; RV32IFD-NEXT: call exp10
; RV32IFD-NEXT: fsd fa0, 24(s0)
; RV32IFD-NEXT: fsd fs1, 16(s0)
; RV32IFD-NEXT: fsd fs2, 8(s0)
@@ -849,16 +849,16 @@ define <4 x double> @exp10_v4f64(<4 x double> %x) {
; RV64IFD-NEXT: fmv.d fs1, fa2
; RV64IFD-NEXT: fmv.d fs2, fa1
; RV64IFD-NEXT: mv s0, a0
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fmv.d fs3, fa0
; RV64IFD-NEXT: fmv.d fa0, fs2
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fmv.d fs2, fa0
; RV64IFD-NEXT: fmv.d fa0, fs1
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fmv.d fs1, fa0
; RV64IFD-NEXT: fmv.d fa0, fs0
-; RV64IFD-NEXT: call exp10@plt
+; RV64IFD-NEXT: call exp10
; RV64IFD-NEXT: fsd fa0, 24(s0)
; RV64IFD-NEXT: fsd fs1, 16(s0)
; RV64IFD-NEXT: fsd fs2, 8(s0)
diff --git a/llvm/test/CodeGen/RISCV/llvm.frexp.ll b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
index 94b9444dfaf8..30f9dd1e5165 100644
--- a/llvm/test/CodeGen/RISCV/llvm.frexp.ll
+++ b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
@@ -23,10 +23,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __extendhfsf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call frexpf
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a1, fa0
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lui a2, 1048560
@@ -40,10 +40,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call __extendhfsf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call frexpf
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a1, fa0
; RV64IFD-NEXT: ld a0, 0(sp)
; RV64IFD-NEXT: lui a2, 1048560
@@ -57,10 +57,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __extendhfsf2@plt
+; RV32IZFINXZDINX-NEXT: call __extendhfsf2
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
-; RV32IZFINXZDINX-NEXT: call __truncsfhf2@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
+; RV32IZFINXZDINX-NEXT: call __truncsfhf2
; RV32IZFINXZDINX-NEXT: lw a1, 8(sp)
; RV32IZFINXZDINX-NEXT: lui a2, 1048560
; RV32IZFINXZDINX-NEXT: or a0, a0, a2
@@ -72,10 +72,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call __extendhfsf2@plt
+; RV64IZFINXZDINX-NEXT: call __extendhfsf2
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
-; RV64IZFINXZDINX-NEXT: call __truncsfhf2@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
+; RV64IZFINXZDINX-NEXT: call __truncsfhf2
; RV64IZFINXZDINX-NEXT: ld a1, 0(sp)
; RV64IZFINXZDINX-NEXT: lui a2, 1048560
; RV64IZFINXZDINX-NEXT: or a0, a0, a2
@@ -89,10 +89,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: addi a1, sp, 8
-; RV32I-NEXT: call frexpf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call frexpf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw a1, 8(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -104,10 +104,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexpf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call frexpf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: lw a1, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -121,10 +121,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __extendhfsf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
-; RV32IFD-NEXT: call __truncsfhf2@plt
+; RV32IFD-NEXT: call frexpf
+; RV32IFD-NEXT: call __truncsfhf2
; RV32IFD-NEXT: fmv.x.w a0, fa0
; RV32IFD-NEXT: lui a1, 1048560
; RV32IFD-NEXT: or a0, a0, a1
@@ -137,10 +137,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call __extendhfsf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
-; RV64IFD-NEXT: call __truncsfhf2@plt
+; RV64IFD-NEXT: call frexpf
+; RV64IFD-NEXT: call __truncsfhf2
; RV64IFD-NEXT: fmv.x.w a0, fa0
; RV64IFD-NEXT: lui a1, 1048560
; RV64IFD-NEXT: or a0, a0, a1
@@ -153,10 +153,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __extendhfsf2@plt
+; RV32IZFINXZDINX-NEXT: call __extendhfsf2
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
-; RV32IZFINXZDINX-NEXT: call __truncsfhf2@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
+; RV32IZFINXZDINX-NEXT: call __truncsfhf2
; RV32IZFINXZDINX-NEXT: lui a1, 1048560
; RV32IZFINXZDINX-NEXT: or a0, a0, a1
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -167,10 +167,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call __extendhfsf2@plt
+; RV64IZFINXZDINX-NEXT: call __extendhfsf2
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
-; RV64IZFINXZDINX-NEXT: call __truncsfhf2@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
+; RV64IZFINXZDINX-NEXT: call __truncsfhf2
; RV64IZFINXZDINX-NEXT: lui a1, 1048560
; RV64IZFINXZDINX-NEXT: or a0, a0, a1
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -183,10 +183,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: addi a1, sp, 8
-; RV32I-NEXT: call frexpf@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call frexpf
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -197,10 +197,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexpf@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call frexpf
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -214,9 +214,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT: call __extendhfsf2@plt
+; RV32IFD-NEXT: call __extendhfsf2
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
@@ -226,9 +226,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IFD-NEXT: call __extendhfsf2@plt
+; RV64IFD-NEXT: call __extendhfsf2
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: ld a0, 0(sp)
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
@@ -238,9 +238,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
; RV32IZFINXZDINX: # %bb.0:
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: call __extendhfsf2@plt
+; RV32IZFINXZDINX-NEXT: call __extendhfsf2
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -250,9 +250,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
; RV64IZFINXZDINX: # %bb.0:
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: call __extendhfsf2@plt
+; RV64IZFINXZDINX-NEXT: call __extendhfsf2
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a0, 0(sp)
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -264,9 +264,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a0, a0, 16
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: addi a1, sp, 8
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: lw a0, 8(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -278,9 +278,9 @@ define i32 @test_frexp_f16_i32_only_use_exp(half %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: lw a0, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -314,7 +314,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
@@ -325,7 +325,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: ld a0, 0(sp)
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
@@ -336,7 +336,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a1, 8(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -347,7 +347,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a1, 0(sp)
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -358,7 +358,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi a1, sp, 8
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: lw a1, 8(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -369,7 +369,7 @@ define { float, i32 } @test_frexp_f32_i32(float %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: lw a1, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -384,7 +384,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -394,7 +394,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -404,7 +404,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -414,7 +414,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -424,7 +424,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi a1, sp, 8
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -434,7 +434,7 @@ define float @test_frexp_f32_i32_only_use_fract(float %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -449,7 +449,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
@@ -460,7 +460,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: ld a0, 0(sp)
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
@@ -471,7 +471,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -482,7 +482,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a0, 0(sp)
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -493,7 +493,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi a1, sp, 8
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: lw a0, 8(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -504,7 +504,7 @@ define i32 @test_frexp_f32_i32_only_use_exp(float %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: lw a0, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -547,19 +547,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV32IFD-NEXT: fmv.s fs2, fa1
; RV32IFD-NEXT: mv s0, a0
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: fmv.s fs3, fa0
; RV32IFD-NEXT: addi a0, sp, 12
; RV32IFD-NEXT: fmv.s fa0, fs2
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: fmv.s fs2, fa0
; RV32IFD-NEXT: addi a0, sp, 16
; RV32IFD-NEXT: fmv.s fa0, fs1
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: fmv.s fs1, fa0
; RV32IFD-NEXT: addi a0, sp, 20
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: lw a0, 20(sp)
; RV32IFD-NEXT: lw a1, 16(sp)
; RV32IFD-NEXT: lw a2, 12(sp)
@@ -595,19 +595,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV64IFD-NEXT: fmv.s fs2, fa1
; RV64IFD-NEXT: mv s0, a0
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: fmv.s fs3, fa0
; RV64IFD-NEXT: addi a0, sp, 8
; RV64IFD-NEXT: fmv.s fa0, fs2
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: fmv.s fs2, fa0
; RV64IFD-NEXT: addi a0, sp, 16
; RV64IFD-NEXT: fmv.s fa0, fs1
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: fmv.s fs1, fa0
; RV64IFD-NEXT: addi a0, sp, 24
; RV64IFD-NEXT: fmv.s fa0, fs0
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: ld a0, 24(sp)
; RV64IFD-NEXT: ld a1, 16(sp)
; RV64IFD-NEXT: ld a2, 8(sp)
@@ -645,19 +645,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
; RV32IZFINXZDINX-NEXT: mv a0, a2
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: mv s4, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
; RV32IZFINXZDINX-NEXT: mv a0, s2
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: mv s2, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
; RV32IZFINXZDINX-NEXT: mv a0, s1
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
; RV32IZFINXZDINX-NEXT: mv a0, s0
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
; RV32IZFINXZDINX-NEXT: lw a2, 16(sp)
; RV32IZFINXZDINX-NEXT: lw a3, 12(sp)
@@ -695,19 +695,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: mv a1, sp
; RV64IZFINXZDINX-NEXT: mv a0, a2
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: mv s4, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
; RV64IZFINXZDINX-NEXT: mv a0, s2
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: mv s2, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
; RV64IZFINXZDINX-NEXT: mv a0, s1
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: mv s1, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
; RV64IZFINXZDINX-NEXT: mv a0, s0
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a1, 24(sp)
; RV64IZFINXZDINX-NEXT: ld a2, 16(sp)
; RV64IZFINXZDINX-NEXT: ld a3, 8(sp)
@@ -745,19 +745,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: addi a1, sp, 12
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: addi a1, sp, 16
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: addi a1, sp, 20
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: lw a1, 8(sp)
; RV32I-NEXT: lw a2, 12(sp)
; RV32I-NEXT: lw a3, 16(sp)
@@ -795,19 +795,19 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: addi a1, sp, 8
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: addi a1, sp, 12
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: lw a1, 0(sp)
; RV64I-NEXT: lw a2, 4(sp)
; RV64I-NEXT: lw a3, 8(sp)
@@ -847,19 +847,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV32IFD-NEXT: fmv.s fs2, fa1
; RV32IFD-NEXT: mv s0, a0
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: fmv.s fs3, fa0
; RV32IFD-NEXT: addi a0, sp, 12
; RV32IFD-NEXT: fmv.s fa0, fs2
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: fmv.s fs2, fa0
; RV32IFD-NEXT: addi a0, sp, 16
; RV32IFD-NEXT: fmv.s fa0, fs1
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: fmv.s fs1, fa0
; RV32IFD-NEXT: addi a0, sp, 20
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: fsw fa0, 12(s0)
; RV32IFD-NEXT: fsw fs1, 8(s0)
; RV32IFD-NEXT: fsw fs2, 4(s0)
@@ -887,19 +887,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV64IFD-NEXT: fmv.s fs2, fa1
; RV64IFD-NEXT: mv s0, a0
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: fmv.s fs3, fa0
; RV64IFD-NEXT: addi a0, sp, 8
; RV64IFD-NEXT: fmv.s fa0, fs2
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: fmv.s fs2, fa0
; RV64IFD-NEXT: addi a0, sp, 16
; RV64IFD-NEXT: fmv.s fa0, fs1
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: fmv.s fs1, fa0
; RV64IFD-NEXT: addi a0, sp, 24
; RV64IFD-NEXT: fmv.s fa0, fs0
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: fsw fa0, 12(s0)
; RV64IFD-NEXT: fsw fs1, 8(s0)
; RV64IFD-NEXT: fsw fs2, 4(s0)
@@ -929,19 +929,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
; RV32IZFINXZDINX-NEXT: mv a0, a2
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: mv s4, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
; RV32IZFINXZDINX-NEXT: mv a0, s2
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: mv s2, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
; RV32IZFINXZDINX-NEXT: mv a0, s1
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
; RV32IZFINXZDINX-NEXT: mv a0, s0
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: sw a0, 12(s3)
; RV32IZFINXZDINX-NEXT: sw s1, 8(s3)
; RV32IZFINXZDINX-NEXT: sw s2, 4(s3)
@@ -971,19 +971,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: mv a1, sp
; RV64IZFINXZDINX-NEXT: mv a0, a2
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: mv s4, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
; RV64IZFINXZDINX-NEXT: mv a0, s2
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: mv s2, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
; RV64IZFINXZDINX-NEXT: mv a0, s1
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: mv s1, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
; RV64IZFINXZDINX-NEXT: mv a0, s0
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: sw a0, 12(s3)
; RV64IZFINXZDINX-NEXT: sw s1, 8(s3)
; RV64IZFINXZDINX-NEXT: sw s2, 4(s3)
@@ -1013,19 +1013,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: addi a1, sp, 12
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: addi a1, sp, 16
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: addi a1, sp, 20
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: sw a0, 12(s3)
; RV32I-NEXT: sw s1, 8(s3)
; RV32I-NEXT: sw s2, 4(s3)
@@ -1055,19 +1055,19 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: mv a1, sp
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: addi a1, sp, 4
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: addi a1, sp, 8
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: addi a1, sp, 12
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: sw a0, 12(s3)
; RV64I-NEXT: sw s1, 8(s3)
; RV64I-NEXT: sw s2, 4(s3)
@@ -1099,16 +1099,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV32IFD-NEXT: fmv.s fs2, fa1
; RV32IFD-NEXT: mv s0, a0
; RV32IFD-NEXT: mv a0, sp
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: addi a0, sp, 4
; RV32IFD-NEXT: fmv.s fa0, fs2
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: addi a0, sp, 8
; RV32IFD-NEXT: fmv.s fa0, fs1
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: addi a0, sp, 12
; RV32IFD-NEXT: fmv.s fa0, fs0
-; RV32IFD-NEXT: call frexpf@plt
+; RV32IFD-NEXT: call frexpf
; RV32IFD-NEXT: lw a0, 12(sp)
; RV32IFD-NEXT: lw a1, 8(sp)
; RV32IFD-NEXT: lw a2, 4(sp)
@@ -1138,16 +1138,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV64IFD-NEXT: fmv.s fs2, fa1
; RV64IFD-NEXT: mv s0, a0
; RV64IFD-NEXT: addi a0, sp, 8
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: addi a0, sp, 16
; RV64IFD-NEXT: fmv.s fa0, fs2
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: addi a0, sp, 24
; RV64IFD-NEXT: fmv.s fa0, fs1
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: addi a0, sp, 32
; RV64IFD-NEXT: fmv.s fa0, fs0
-; RV64IFD-NEXT: call frexpf@plt
+; RV64IFD-NEXT: call frexpf
; RV64IFD-NEXT: ld a0, 32(sp)
; RV64IFD-NEXT: ld a1, 24(sp)
; RV64IFD-NEXT: ld a2, 16(sp)
@@ -1179,16 +1179,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
; RV32IZFINXZDINX-NEXT: mv a0, a2
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
; RV32IZFINXZDINX-NEXT: mv a0, s2
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
; RV32IZFINXZDINX-NEXT: mv a0, s1
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 24
; RV32IZFINXZDINX-NEXT: mv a0, s0
-; RV32IZFINXZDINX-NEXT: call frexpf@plt
+; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a0, 24(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
; RV32IZFINXZDINX-NEXT: lw a2, 16(sp)
@@ -1220,16 +1220,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
; RV64IZFINXZDINX-NEXT: mv a0, a2
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
; RV64IZFINXZDINX-NEXT: mv a0, s2
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
; RV64IZFINXZDINX-NEXT: mv a0, s1
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 32
; RV64IZFINXZDINX-NEXT: mv a0, s0
-; RV64IZFINXZDINX-NEXT: call frexpf@plt
+; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a0, 32(sp)
; RV64IZFINXZDINX-NEXT: ld a1, 24(sp)
; RV64IZFINXZDINX-NEXT: ld a2, 16(sp)
@@ -1261,16 +1261,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: addi a1, sp, 12
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: addi a1, sp, 16
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: addi a1, sp, 20
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: addi a1, sp, 24
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call frexpf@plt
+; RV32I-NEXT: call frexpf
; RV32I-NEXT: lw a0, 24(sp)
; RV32I-NEXT: lw a1, 20(sp)
; RV32I-NEXT: lw a2, 16(sp)
@@ -1302,16 +1302,16 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: addi a1, sp, 8
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: addi a1, sp, 12
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: addi a1, sp, 16
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: addi a1, sp, 20
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call frexpf@plt
+; RV64I-NEXT: call frexpf
; RV64I-NEXT: lw a0, 20(sp)
; RV64I-NEXT: lw a1, 16(sp)
; RV64I-NEXT: lw a2, 12(sp)
@@ -1338,7 +1338,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexp@plt
+; RV32IFD-NEXT: call frexp
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
@@ -1349,7 +1349,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexp@plt
+; RV64IFD-NEXT: call frexp
; RV64IFD-NEXT: ld a0, 0(sp)
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
@@ -1360,7 +1360,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: addi a2, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexp@plt
+; RV32IZFINXZDINX-NEXT: call frexp
; RV32IZFINXZDINX-NEXT: lw a2, 8(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -1371,7 +1371,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexp@plt
+; RV64IZFINXZDINX-NEXT: call frexp
; RV64IZFINXZDINX-NEXT: ld a1, 0(sp)
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -1387,7 +1387,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
; RV32I-NEXT: addi a2, sp, 4
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: mv a1, a3
-; RV32I-NEXT: call frexp@plt
+; RV32I-NEXT: call frexp
; RV32I-NEXT: lw a2, 4(sp)
; RV32I-NEXT: sw a1, 4(s0)
; RV32I-NEXT: sw a0, 0(s0)
@@ -1402,7 +1402,7 @@ define { double, i32 } @test_frexp_f64_i32(double %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexp@plt
+; RV64I-NEXT: call frexp
; RV64I-NEXT: lw a1, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1417,7 +1417,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexp@plt
+; RV32IFD-NEXT: call frexp
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
@@ -1427,7 +1427,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexp@plt
+; RV64IFD-NEXT: call frexp
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1437,7 +1437,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: addi a2, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexp@plt
+; RV32IZFINXZDINX-NEXT: call frexp
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -1447,7 +1447,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexp@plt
+; RV64IZFINXZDINX-NEXT: call frexp
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1457,7 +1457,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi a2, sp, 8
-; RV32I-NEXT: call frexp@plt
+; RV32I-NEXT: call frexp
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1467,7 +1467,7 @@ define double @test_frexp_f64_i32_only_use_fract(double %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexp@plt
+; RV64I-NEXT: call frexp
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1482,7 +1482,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: addi a0, sp, 8
-; RV32IFD-NEXT: call frexp@plt
+; RV32IFD-NEXT: call frexp
; RV32IFD-NEXT: lw a0, 8(sp)
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
@@ -1493,7 +1493,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: mv a0, sp
-; RV64IFD-NEXT: call frexp@plt
+; RV64IFD-NEXT: call frexp
; RV64IFD-NEXT: ld a0, 0(sp)
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
@@ -1504,7 +1504,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi sp, sp, -16
; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: addi a2, sp, 8
-; RV32IZFINXZDINX-NEXT: call frexp@plt
+; RV32IZFINXZDINX-NEXT: call frexp
; RV32IZFINXZDINX-NEXT: lw a0, 8(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -1515,7 +1515,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: call frexp@plt
+; RV64IZFINXZDINX-NEXT: call frexp
; RV64IZFINXZDINX-NEXT: ld a0, 0(sp)
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -1526,7 +1526,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi a2, sp, 8
-; RV32I-NEXT: call frexp@plt
+; RV32I-NEXT: call frexp
; RV32I-NEXT: lw a0, 8(sp)
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -1537,7 +1537,7 @@ define i32 @test_frexp_f64_i32_only_use_exp(double %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a1, sp, 4
-; RV64I-NEXT: call frexp@plt
+; RV64I-NEXT: call frexp
; RV64I-NEXT: lw a0, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1583,7 +1583,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
; RV32IFD-NEXT: mv a1, sp
; RV32IFD-NEXT: addi a2, sp, 36
; RV32IFD-NEXT: sw a3, 0(sp)
-; RV32IFD-NEXT: call frexpl@plt
+; RV32IFD-NEXT: call frexpl
; RV32IFD-NEXT: lw a0, 36(sp)
; RV32IFD-NEXT: lw a1, 28(sp)
; RV32IFD-NEXT: lw a2, 24(sp)
@@ -1609,7 +1609,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
; RV64IFD-NEXT: addi a2, sp, 12
; RV64IFD-NEXT: mv a0, a1
; RV64IFD-NEXT: mv a1, a3
-; RV64IFD-NEXT: call frexpl@plt
+; RV64IFD-NEXT: call frexpl
; RV64IFD-NEXT: lw a2, 12(sp)
; RV64IFD-NEXT: sd a1, 8(s0)
; RV64IFD-NEXT: sd a0, 0(s0)
@@ -1636,7 +1636,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
; RV32IZFINXZDINX-NEXT: mv a1, sp
; RV32IZFINXZDINX-NEXT: addi a2, sp, 36
; RV32IZFINXZDINX-NEXT: sw a3, 0(sp)
-; RV32IZFINXZDINX-NEXT: call frexpl@plt
+; RV32IZFINXZDINX-NEXT: call frexpl
; RV32IZFINXZDINX-NEXT: lw a0, 36(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 28(sp)
; RV32IZFINXZDINX-NEXT: lw a2, 24(sp)
@@ -1662,7 +1662,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi a2, sp, 12
; RV64IZFINXZDINX-NEXT: mv a0, a1
; RV64IZFINXZDINX-NEXT: mv a1, a3
-; RV64IZFINXZDINX-NEXT: call frexpl@plt
+; RV64IZFINXZDINX-NEXT: call frexpl
; RV64IZFINXZDINX-NEXT: lw a2, 12(sp)
; RV64IZFINXZDINX-NEXT: sd a1, 8(s0)
; RV64IZFINXZDINX-NEXT: sd a0, 0(s0)
@@ -1689,7 +1689,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
; RV32I-NEXT: mv a1, sp
; RV32I-NEXT: addi a2, sp, 36
; RV32I-NEXT: sw a3, 0(sp)
-; RV32I-NEXT: call frexpl@plt
+; RV32I-NEXT: call frexpl
; RV32I-NEXT: lw a0, 36(sp)
; RV32I-NEXT: lw a1, 28(sp)
; RV32I-NEXT: lw a2, 24(sp)
@@ -1715,7 +1715,7 @@ define { fp128, i32 } @test_frexp_f128_i32(fp128 %a) nounwind {
; RV64I-NEXT: addi a2, sp, 12
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, a3
-; RV64I-NEXT: call frexpl@plt
+; RV64I-NEXT: call frexpl
; RV64I-NEXT: lw a2, 12(sp)
; RV64I-NEXT: sd a1, 8(s0)
; RV64I-NEXT: sd a0, 0(s0)
@@ -1746,7 +1746,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
; RV32IFD-NEXT: mv a1, sp
; RV32IFD-NEXT: addi a2, sp, 36
; RV32IFD-NEXT: sw a3, 0(sp)
-; RV32IFD-NEXT: call frexpl@plt
+; RV32IFD-NEXT: call frexpl
; RV32IFD-NEXT: lw a0, 28(sp)
; RV32IFD-NEXT: lw a1, 24(sp)
; RV32IFD-NEXT: lw a2, 20(sp)
@@ -1765,7 +1765,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: addi a2, sp, 4
-; RV64IFD-NEXT: call frexpl@plt
+; RV64IFD-NEXT: call frexpl
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
@@ -1787,7 +1787,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
; RV32IZFINXZDINX-NEXT: mv a1, sp
; RV32IZFINXZDINX-NEXT: addi a2, sp, 36
; RV32IZFINXZDINX-NEXT: sw a3, 0(sp)
-; RV32IZFINXZDINX-NEXT: call frexpl@plt
+; RV32IZFINXZDINX-NEXT: call frexpl
; RV32IZFINXZDINX-NEXT: lw a0, 28(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 24(sp)
; RV32IZFINXZDINX-NEXT: lw a2, 20(sp)
@@ -1806,7 +1806,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: addi a2, sp, 4
-; RV64IZFINXZDINX-NEXT: call frexpl@plt
+; RV64IZFINXZDINX-NEXT: call frexpl
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -1828,7 +1828,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
; RV32I-NEXT: mv a1, sp
; RV32I-NEXT: addi a2, sp, 36
; RV32I-NEXT: sw a3, 0(sp)
-; RV32I-NEXT: call frexpl@plt
+; RV32I-NEXT: call frexpl
; RV32I-NEXT: lw a0, 28(sp)
; RV32I-NEXT: lw a1, 24(sp)
; RV32I-NEXT: lw a2, 20(sp)
@@ -1847,7 +1847,7 @@ define fp128 @test_frexp_f128_i32_only_use_fract(fp128 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a2, sp, 4
-; RV64I-NEXT: call frexpl@plt
+; RV64I-NEXT: call frexpl
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1872,7 +1872,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
; RV32IFD-NEXT: addi a1, sp, 8
; RV32IFD-NEXT: addi a2, sp, 40
; RV32IFD-NEXT: sw a3, 8(sp)
-; RV32IFD-NEXT: call frexpl@plt
+; RV32IFD-NEXT: call frexpl
; RV32IFD-NEXT: lw a0, 40(sp)
; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 48
@@ -1883,7 +1883,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: addi a2, sp, 4
-; RV64IFD-NEXT: call frexpl@plt
+; RV64IFD-NEXT: call frexpl
; RV64IFD-NEXT: lw a0, 4(sp)
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
@@ -1904,7 +1904,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
; RV32IZFINXZDINX-NEXT: addi a2, sp, 40
; RV32IZFINXZDINX-NEXT: sw a3, 8(sp)
-; RV32IZFINXZDINX-NEXT: call frexpl@plt
+; RV32IZFINXZDINX-NEXT: call frexpl
; RV32IZFINXZDINX-NEXT: lw a0, 40(sp)
; RV32IZFINXZDINX-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 48
@@ -1915,7 +1915,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
; RV64IZFINXZDINX-NEXT: addi sp, sp, -16
; RV64IZFINXZDINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: addi a2, sp, 4
-; RV64IZFINXZDINX-NEXT: call frexpl@plt
+; RV64IZFINXZDINX-NEXT: call frexpl
; RV64IZFINXZDINX-NEXT: lw a0, 4(sp)
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
@@ -1936,7 +1936,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
; RV32I-NEXT: addi a1, sp, 8
; RV32I-NEXT: addi a2, sp, 40
; RV32I-NEXT: sw a3, 8(sp)
-; RV32I-NEXT: call frexpl@plt
+; RV32I-NEXT: call frexpl
; RV32I-NEXT: lw a0, 40(sp)
; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 48
@@ -1947,7 +1947,7 @@ define i32 @test_frexp_f128_i32_only_use_exp(fp128 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addi a2, sp, 4
-; RV64I-NEXT: call frexpl@plt
+; RV64I-NEXT: call frexpl
; RV64I-NEXT: lw a0, 4(sp)
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll b/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll
index 45c582a61330..a1a6fa1f62a5 100644
--- a/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll
+++ b/llvm/test/CodeGen/RISCV/machine-outliner-and-machine-copy-propagation.ll
@@ -147,47 +147,47 @@ declare void @exit(i32 signext) noreturn
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT: call nge@plt
+; RV64I-NEXT: call nge
; RV64I-NEXT: bnez a0, .LBB4_9
; RV64I-NEXT: # %bb.1: # %if.end
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call nge@plt
+; RV64I-NEXT: call nge
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: bne a0, a1, .LBB4_9
; RV64I-NEXT: # %bb.2: # %if.end4
; RV64I-NEXT: call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT: call ngt@plt
+; RV64I-NEXT: call ngt
; RV64I-NEXT: bnez a0, .LBB4_9
; RV64I-NEXT: # %bb.3: # %if.end8
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call ngt@plt
+; RV64I-NEXT: call ngt
; RV64I-NEXT: li s1, -1
; RV64I-NEXT: bne a0, s1, .LBB4_9
; RV64I-NEXT: # %bb.4: # %if.end12
; RV64I-NEXT: call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT: call nle@plt
+; RV64I-NEXT: call nle
; RV64I-NEXT: bne a0, s1, .LBB4_9
; RV64I-NEXT: # %bb.5: # %if.end16
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call nle@plt
+; RV64I-NEXT: call nle
; RV64I-NEXT: bnez a0, .LBB4_9
; RV64I-NEXT: # %bb.6: # %if.end20
; RV64I-NEXT: call t0, OUTLINED_FUNCTION_0
-; RV64I-NEXT: call nlt@plt
+; RV64I-NEXT: call nlt
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: bne a0, a1, .LBB4_9
; RV64I-NEXT: # %bb.7: # %if.end24
; RV64I-NEXT: lui a1, 524288
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call nlt@plt
+; RV64I-NEXT: call nlt
; RV64I-NEXT: bnez a0, .LBB4_9
; RV64I-NEXT: # %bb.8: # %if.end28
-; RV64I-NEXT: call exit@plt
+; RV64I-NEXT: call exit
; RV64I-NEXT: .LBB4_9: # %if.then
-; RV64I-NEXT: call abort@plt
+; RV64I-NEXT: call abort
;
; RV64I-LABEL: OUTLINED_FUNCTION_0:
; RV64I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll b/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
index 21254b630203..2de29fe2fa68 100644
--- a/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
+++ b/llvm/test/CodeGen/RISCV/machine-outliner-throw.ll
@@ -15,12 +15,12 @@ define i32 @func1(i32 %x) #0 {
; CHECK-NEXT: mul a0, a0, a0
; CHECK-NEXT: addi s0, a0, 1
; CHECK-NEXT: li a0, 4
-; CHECK-NEXT: call __cxa_allocate_exception@plt
+; CHECK-NEXT: call __cxa_allocate_exception
; CHECK-NEXT: sw s0, 0(a0)
; CHECK-NEXT: lui a1, %hi(_ZTIi)
; CHECK-NEXT: addi a1, a1, %lo(_ZTIi)
; CHECK-NEXT: li a2, 0
-; CHECK-NEXT: call __cxa_throw@plt
+; CHECK-NEXT: call __cxa_throw
entry:
%mul = mul i32 %x, %x
%add = add i32 %mul, 1
@@ -42,12 +42,12 @@ define i32 @func2(i32 %x) #0 {
; CHECK-NEXT: mul a0, a0, a0
; CHECK-NEXT: addi s0, a0, 1
; CHECK-NEXT: li a0, 4
-; CHECK-NEXT: call __cxa_allocate_exception@plt
+; CHECK-NEXT: call __cxa_allocate_exception
; CHECK-NEXT: sw s0, 0(a0)
; CHECK-NEXT: lui a1, %hi(_ZTIi)
; CHECK-NEXT: addi a1, a1, %lo(_ZTIi)
; CHECK-NEXT: li a2, 0
-; CHECK-NEXT: call __cxa_throw@plt
+; CHECK-NEXT: call __cxa_throw
entry:
%mul = mul i32 %x, %x
%add = add i32 %mul, 1
diff --git a/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll
index 17167e7f51b4..27297c978718 100644
--- a/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll
+++ b/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll
@@ -156,7 +156,7 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
; RV32I-NEXT: .LBB3_1: # %loop
; RV32I-NEXT: # =>This Inner Loop Header: Depth=1
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __tls_get_addr@plt
+; RV32I-NEXT: call __tls_get_addr
; RV32I-NEXT: lw zero, 0(a0)
; RV32I-NEXT: addi s2, s2, 1
; RV32I-NEXT: blt s2, s0, .LBB3_1
@@ -183,7 +183,7 @@ define void @test_la_tls_gd(i32 signext %n) nounwind {
; RV64I-NEXT: .LBB3_1: # %loop
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __tls_get_addr@plt
+; RV64I-NEXT: call __tls_get_addr
; RV64I-NEXT: lw zero, 0(a0)
; RV64I-NEXT: addiw s2, s2, 1
; RV64I-NEXT: blt s2, s0, .LBB3_1
diff --git a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
index 498e6cf23ba3..b45365e7a8b6 100644
--- a/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
+++ b/llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll
@@ -14,21 +14,21 @@ define void @foo(i32 signext %0, i32 signext %1) {
; NOFUSION-NEXT: lui a0, %hi(.L.str)
; NOFUSION-NEXT: fcvt.s.w fa0, a1
; NOFUSION-NEXT: addi a0, a0, %lo(.L.str)
-; NOFUSION-NEXT: tail bar@plt
+; NOFUSION-NEXT: tail bar
;
; FUSION-LABEL: foo:
; FUSION: # %bb.0:
; FUSION-NEXT: fcvt.s.w fa0, a1
; FUSION-NEXT: lui a0, %hi(.L.str)
; FUSION-NEXT: addi a0, a0, %lo(.L.str)
-; FUSION-NEXT: tail bar@plt
+; FUSION-NEXT: tail bar
;
; FUSION-POSTRA-LABEL: foo:
; FUSION-POSTRA: # %bb.0:
; FUSION-POSTRA-NEXT: fcvt.s.w fa0, a1
; FUSION-POSTRA-NEXT: lui a0, %hi(.L.str)
; FUSION-POSTRA-NEXT: addi a0, a0, %lo(.L.str)
-; FUSION-POSTRA-NEXT: tail bar@plt
+; FUSION-POSTRA-NEXT: tail bar
%3 = sitofp i32 %1 to float
tail call void @bar(ptr @.str, float %3)
ret void
diff --git a/llvm/test/CodeGen/RISCV/make-compressible.mir b/llvm/test/CodeGen/RISCV/make-compressible.mir
index e526b131a017..91c2d95b5051 100644
--- a/llvm/test/CodeGen/RISCV/make-compressible.mir
+++ b/llvm/test/CodeGen/RISCV/make-compressible.mir
@@ -550,18 +550,18 @@ body: |
; RV32-NEXT: renamable $f10_f = FLW $x10, 0 :: (load (s32) from %ir.g)
; RV32-NEXT: renamable $f11_f = FLW $x10, 4 :: (load (s32) from %ir.arrayidx1)
; RV32-NEXT: renamable $f12_f = FLW killed $x10, 8 :: (load (s32) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-plt) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
; RV64-LABEL: name: load_common_ptr_float
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
; RV64-NEXT: renamable $f10_f = FLW renamable $x16, 0 :: (load (s32) from %ir.g)
; RV64-NEXT: renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1)
; RV64-NEXT: renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2)
- ; RV64-NEXT: PseudoTAIL target-flags(riscv-plt) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
renamable $f10_f = FLW renamable $x16, 0 :: (load (s32) from %ir.g)
renamable $f11_f = FLW renamable $x16, 4 :: (load (s32) from %ir.arrayidx1)
renamable $f12_f = FLW killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2)
- PseudoTAIL target-flags(riscv-plt) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
...
---
@@ -578,7 +578,7 @@ body: |
; RV32-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g)
; RV32-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1)
; RV32-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-plt) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
; RV64-LABEL: name: load_common_ptr_double
; RV64: liveins: $x16
; RV64-NEXT: {{ $}}
@@ -586,11 +586,11 @@ body: |
; RV64-NEXT: renamable $f10_d = FLD $x10, 0 :: (load (s64) from %ir.g)
; RV64-NEXT: renamable $f11_d = FLD $x10, 8 :: (load (s64) from %ir.arrayidx1)
; RV64-NEXT: renamable $f12_d = FLD killed $x10, 16 :: (load (s64) from %ir.arrayidx2)
- ; RV64-NEXT: PseudoTAIL target-flags(riscv-plt) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
renamable $f10_d = FLD renamable $x16, 0 :: (load (s64) from %ir.g)
renamable $f11_d = FLD renamable $x16, 8 :: (load (s64) from %ir.arrayidx1)
renamable $f12_d = FLD killed renamable $x16, 16 :: (load (s64) from %ir.arrayidx2)
- PseudoTAIL target-flags(riscv-plt) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ PseudoTAIL target-flags(riscv-call) @load_common_ptr_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
...
---
@@ -746,18 +746,18 @@ body: |
; RV32-NEXT: renamable $f10_f = FLW $x11, 16 :: (load (s32) from %ir.arrayidx)
; RV32-NEXT: renamable $f11_f = FLW $x11, 20 :: (load (s32) from %ir.arrayidx1)
; RV32-NEXT: renamable $f12_f = FLW killed $x11, 24 :: (load (s32) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-plt) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
; RV64-LABEL: name: load_large_offset_float
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
; RV64-NEXT: renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
; RV64-NEXT: renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
; RV64-NEXT: renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2)
- ; RV64-NEXT: PseudoTAIL target-flags(riscv-plt) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
renamable $f10_f = FLW renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
renamable $f11_f = FLW renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
renamable $f12_f = FLW killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2)
- PseudoTAIL target-flags(riscv-plt) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
+ PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $f10_f, implicit $f11_f, implicit $f12_f
...
---
@@ -774,7 +774,7 @@ body: |
; RV32-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx)
; RV32-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1)
; RV32-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2)
- ; RV32-NEXT: PseudoTAIL target-flags(riscv-plt) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ; RV32-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
; RV64-LABEL: name: load_large_offset_double
; RV64: liveins: $x10
; RV64-NEXT: {{ $}}
@@ -782,11 +782,11 @@ body: |
; RV64-NEXT: renamable $f10_d = FLD $x11, 32 :: (load (s64) from %ir.arrayidx)
; RV64-NEXT: renamable $f11_d = FLD $x11, 40 :: (load (s64) from %ir.arrayidx1)
; RV64-NEXT: renamable $f12_d = FLD killed $x11, 48 :: (load (s64) from %ir.arrayidx2)
- ; RV64-NEXT: PseudoTAIL target-flags(riscv-plt) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ ; RV64-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
renamable $f10_d = FLD renamable $x10, 800 :: (load (s64) from %ir.arrayidx)
renamable $f11_d = FLD renamable $x10, 808 :: (load (s64) from %ir.arrayidx1)
renamable $f12_d = FLD killed renamable $x10, 816 :: (load (s64) from %ir.arrayidx2)
- PseudoTAIL target-flags(riscv-plt) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
+ PseudoTAIL target-flags(riscv-call) @load_large_offset_double_1, implicit $x2, implicit $f10_d, implicit $f11_d, implicit $f12_d
...
---
diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll
index 7c98d4ae1b3f..a9cb80cb6634 100644
--- a/llvm/test/CodeGen/RISCV/mem.ll
+++ b/llvm/test/CodeGen/RISCV/mem.ll
@@ -324,7 +324,7 @@ define void @addi_fold_crash(i32 %arg) nounwind {
; RV32I-NEXT: add a0, a1, a0
; RV32I-NEXT: sb zero, 0(a0)
; RV32I-NEXT: mv a0, a1
-; RV32I-NEXT: call snork@plt
+; RV32I-NEXT: call snork
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index 09b04535498c..248964146325 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -363,7 +363,7 @@ define void @addi_fold_crash(i64 %arg) nounwind {
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: sb zero, 0(a0)
; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: call snork@plt
+; RV64I-NEXT: call snork
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/memcpy.ll b/llvm/test/CodeGen/RISCV/memcpy.ll
index 26ad87250919..12ec0881b20d 100644
--- a/llvm/test/CodeGen/RISCV/memcpy.ll
+++ b/llvm/test/CodeGen/RISCV/memcpy.ll
@@ -91,14 +91,14 @@ define void @t1(ptr nocapture %C) nounwind {
; RV32-NEXT: lui a1, %hi(.L.str1)
; RV32-NEXT: addi a1, a1, %lo(.L.str1)
; RV32-NEXT: li a2, 31
-; RV32-NEXT: tail memcpy@plt
+; RV32-NEXT: tail memcpy
;
; RV64-LABEL: t1:
; RV64: # %bb.0: # %entry
; RV64-NEXT: lui a1, %hi(.L.str1)
; RV64-NEXT: addi a1, a1, %lo(.L.str1)
; RV64-NEXT: li a2, 31
-; RV64-NEXT: tail memcpy@plt
+; RV64-NEXT: tail memcpy
;
; RV32-FAST-LABEL: t1:
; RV32-FAST: # %bb.0: # %entry
@@ -152,14 +152,14 @@ define void @t2(ptr nocapture %C) nounwind {
; RV32-BOTH-NEXT: lui a1, %hi(.L.str2)
; RV32-BOTH-NEXT: addi a1, a1, %lo(.L.str2)
; RV32-BOTH-NEXT: li a2, 36
-; RV32-BOTH-NEXT: tail memcpy@plt
+; RV32-BOTH-NEXT: tail memcpy
;
; RV64-LABEL: t2:
; RV64: # %bb.0: # %entry
; RV64-NEXT: lui a1, %hi(.L.str2)
; RV64-NEXT: addi a1, a1, %lo(.L.str2)
; RV64-NEXT: li a2, 36
-; RV64-NEXT: tail memcpy@plt
+; RV64-NEXT: tail memcpy
;
; RV64-FAST-LABEL: t2:
; RV64-FAST: # %bb.0: # %entry
@@ -188,14 +188,14 @@ define void @t3(ptr nocapture %C) nounwind {
; RV32-NEXT: lui a1, %hi(.L.str3)
; RV32-NEXT: addi a1, a1, %lo(.L.str3)
; RV32-NEXT: li a2, 24
-; RV32-NEXT: tail memcpy@plt
+; RV32-NEXT: tail memcpy
;
; RV64-LABEL: t3:
; RV64: # %bb.0: # %entry
; RV64-NEXT: lui a1, %hi(.L.str3)
; RV64-NEXT: addi a1, a1, %lo(.L.str3)
; RV64-NEXT: li a2, 24
-; RV64-NEXT: tail memcpy@plt
+; RV64-NEXT: tail memcpy
;
; RV32-FAST-LABEL: t3:
; RV32-FAST: # %bb.0: # %entry
@@ -241,14 +241,14 @@ define void @t4(ptr nocapture %C) nounwind {
; RV32-NEXT: lui a1, %hi(.L.str4)
; RV32-NEXT: addi a1, a1, %lo(.L.str4)
; RV32-NEXT: li a2, 18
-; RV32-NEXT: tail memcpy@plt
+; RV32-NEXT: tail memcpy
;
; RV64-LABEL: t4:
; RV64: # %bb.0: # %entry
; RV64-NEXT: lui a1, %hi(.L.str4)
; RV64-NEXT: addi a1, a1, %lo(.L.str4)
; RV64-NEXT: li a2, 18
-; RV64-NEXT: tail memcpy@plt
+; RV64-NEXT: tail memcpy
;
; RV32-FAST-LABEL: t4:
; RV32-FAST: # %bb.0: # %entry
@@ -353,7 +353,7 @@ define void @t6() nounwind {
; RV32-NEXT: lui a1, %hi(.L.str6)
; RV32-NEXT: addi a1, a1, %lo(.L.str6)
; RV32-NEXT: li a2, 14
-; RV32-NEXT: call memcpy@plt
+; RV32-NEXT: call memcpy
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -367,7 +367,7 @@ define void @t6() nounwind {
; RV64-NEXT: lui a1, %hi(.L.str6)
; RV64-NEXT: addi a1, a1, %lo(.L.str6)
; RV64-NEXT: li a2, 14
-; RV64-NEXT: call memcpy@plt
+; RV64-NEXT: call memcpy
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/mir-target-flags.ll b/llvm/test/CodeGen/RISCV/mir-target-flags.ll
index c4c6a1435732..fdc0d894b2ca 100644
--- a/llvm/test/CodeGen/RISCV/mir-target-flags.ll
+++ b/llvm/test/CodeGen/RISCV/mir-target-flags.ll
@@ -35,7 +35,7 @@ define i32 @caller(i32 %a) nounwind {
; RV32-SMALL: target-flags(riscv-tprel-hi) @t_le
; RV32-SMALL-NEXT: target-flags(riscv-tprel-add) @t_le
; RV32-SMALL-NEXT: target-flags(riscv-tprel-lo) @t_le
-; RV32-SMALL: target-flags(riscv-plt) @callee
+; RV32-SMALL: target-flags(riscv-call) @callee
;
; RV32-MED-LABEL: name: caller
; RV32-MED: target-flags(riscv-got-hi) @g_e
@@ -44,16 +44,16 @@ define i32 @caller(i32 %a) nounwind {
; RV32-MED-NEXT: target-flags(riscv-pcrel-lo) <mcsymbol .Lpcrel_hi1>
; RV32-MED: target-flags(riscv-tls-gd-hi) @t_un
; RV32-MED-NEXT: target-flags(riscv-pcrel-lo) <mcsymbol .Lpcrel_hi2>
-; RV32-MED: target-flags(riscv-plt) &__tls_get_addr
+; RV32-MED: target-flags(riscv-call) &__tls_get_addr
; RV32-MED: target-flags(riscv-tls-gd-hi) @t_ld
; RV32-MED-NEXT: target-flags(riscv-pcrel-lo) <mcsymbol .Lpcrel_hi3>
-; RV32-MED: target-flags(riscv-plt) &__tls_get_addr
+; RV32-MED: target-flags(riscv-call) &__tls_get_addr
; RV32-MED: target-flags(riscv-tls-got-hi) @t_ie
; RV32-MED-NEXT: target-flags(riscv-pcrel-lo) <mcsymbol .Lpcrel_hi4>
; RV32-MED: target-flags(riscv-tprel-hi) @t_le
; RV32-MED-NEXT: target-flags(riscv-tprel-add) @t_le
; RV32-MED-NEXT: target-flags(riscv-tprel-lo) @t_le
-; RV32-MED: target-flags(riscv-plt) @callee
+; RV32-MED: target-flags(riscv-call) @callee
;
%b = load i32, ptr @g_e
%c = load i32, ptr @g_i
diff --git a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
index b7c0a9f29273..45db5078f150 100644
--- a/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
+++ b/llvm/test/CodeGen/RISCV/miss-sp-restore-eh.ll
@@ -34,7 +34,7 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: li a5, 0
; CHECK-NEXT: li a6, 0
; CHECK-NEXT: li a7, 0
-; CHECK-NEXT: call _Z3fooiiiiiiiiiiPi@plt
+; CHECK-NEXT: call _Z3fooiiiiiiiiiiPi
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: .Ltmp1:
; CHECK-NEXT: # %bb.1: # %try.cont.unreachable
@@ -44,9 +44,9 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: li a2, 1
; CHECK-NEXT: bne a1, a2, .LBB0_4
; CHECK-NEXT: # %bb.3: # %catch
-; CHECK-NEXT: call __cxa_begin_catch@plt
+; CHECK-NEXT: call __cxa_begin_catch
; CHECK-NEXT: lw s1, 0(a0)
-; CHECK-NEXT: call __cxa_end_catch@plt
+; CHECK-NEXT: call __cxa_end_catch
; CHECK-NEXT: mv a0, s1
; CHECK-NEXT: addi sp, s0, -32
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -55,7 +55,7 @@ define signext i32 @foo() #1 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB0_4: # %ehcleanup
-; CHECK-NEXT: call _Unwind_Resume@plt
+; CHECK-NEXT: call _Unwind_Resume
entry:
invoke void @_Z3fooiiiiiiiiiiPi(i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 signext poison, i32 poison, i32 poison, i32 poison)
to label %try.cont.unreachable unwind label %lpad
diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll
index f2b7e8d26328..af341dbaadea 100644
--- a/llvm/test/CodeGen/RISCV/mul.ll
+++ b/llvm/test/CodeGen/RISCV/mul.ll
@@ -14,7 +14,7 @@ define signext i32 @square(i32 %a) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv a1, a0
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -29,7 +29,7 @@ define signext i32 @square(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv a1, a0
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -48,7 +48,7 @@ define signext i32 @mul(i32 %a, i32 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -62,7 +62,7 @@ define signext i32 @mul(i32 %a, i32 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -133,7 +133,7 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -150,7 +150,7 @@ define i64 @mul64(i64 %a, i64 %b) nounwind {
;
; RV64I-LABEL: mul64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __muldi3@plt
+; RV64I-NEXT: tail __muldi3
;
; RV64IM-LABEL: mul64:
; RV64IM: # %bb.0:
@@ -208,7 +208,7 @@ define i32 @mulhs(i32 %a, i32 %b) nounwind {
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srai a3, a2, 31
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -225,7 +225,7 @@ define i32 @mulhs(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -343,7 +343,7 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -358,7 +358,7 @@ define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -385,7 +385,7 @@ define i32 @mulhsu(i32 %a, i32 %b) nounwind {
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: srai a3, a1, 31
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -403,7 +403,7 @@ define i32 @mulhsu(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -751,7 +751,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
; RV32I-LABEL: muli32_p384:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 384
-; RV32I-NEXT: tail __mulsi3@plt
+; RV32I-NEXT: tail __mulsi3
;
; RV32IM-LABEL: muli32_p384:
; RV32IM: # %bb.0:
@@ -764,7 +764,7 @@ define i32 @muli32_p384(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, 384
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -782,7 +782,7 @@ define i32 @muli32_p12288(i32 %a) nounwind {
; RV32I-LABEL: muli32_p12288:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 3
-; RV32I-NEXT: tail __mulsi3@plt
+; RV32I-NEXT: tail __mulsi3
;
; RV32IM-LABEL: muli32_p12288:
; RV32IM: # %bb.0:
@@ -795,7 +795,7 @@ define i32 @muli32_p12288(i32 %a) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lui a1, 3
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -910,7 +910,7 @@ define i32 @muli32_m4352(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, -17
; RV32I-NEXT: slli a1, a1, 8
-; RV32I-NEXT: tail __mulsi3@plt
+; RV32I-NEXT: tail __mulsi3
;
; RV32IM-LABEL: muli32_m4352:
; RV32IM: # %bb.0:
@@ -925,7 +925,7 @@ define i32 @muli32_m4352(i32 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: li a1, -17
; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -1036,7 +1036,7 @@ define i64 @muli64_m4352(i64 %a) nounwind {
; RV32I-NEXT: li a2, -17
; RV32I-NEXT: slli a2, a2, 8
; RV32I-NEXT: li a3, -1
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1056,7 +1056,7 @@ define i64 @muli64_m4352(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, -17
; RV64I-NEXT: slli a1, a1, 8
-; RV64I-NEXT: tail __muldi3@plt
+; RV64I-NEXT: tail __muldi3
;
; RV64IM-LABEL: muli64_m4352:
; RV64IM: # %bb.0:
@@ -1395,13 +1395,13 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
; RV32I-NEXT: srai s4, a3, 31
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: mv a2, s3
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: add s5, a0, s5
; RV32I-NEXT: sltu a0, s5, a0
; RV32I-NEXT: add s7, a1, a0
@@ -1409,7 +1409,7 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: add s5, a0, s5
; RV32I-NEXT: sltu a0, s5, a0
; RV32I-NEXT: add a0, a1, a0
@@ -1418,7 +1418,7 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: mv a2, s2
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv s5, a0
; RV32I-NEXT: mv s6, a1
; RV32I-NEXT: add s9, a0, s8
@@ -1426,14 +1426,14 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
; RV32I-NEXT: mv a1, s2
; RV32I-NEXT: li a2, 0
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: mv a0, s4
; RV32I-NEXT: mv a1, s4
; RV32I-NEXT: mv a2, s1
; RV32I-NEXT: mv a3, s0
-; RV32I-NEXT: call __muldi3@plt
+; RV32I-NEXT: call __muldi3
; RV32I-NEXT: add s2, a0, s2
; RV32I-NEXT: add a2, s9, s2
; RV32I-NEXT: sltu a3, a2, s9
@@ -1502,7 +1502,7 @@ define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind {
; RV64I-NEXT: mv a2, a1
; RV64I-NEXT: srai a3, a1, 63
; RV64I-NEXT: li a1, 0
-; RV64I-NEXT: call __multi3@plt
+; RV64I-NEXT: call __multi3
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/nest-register.ll b/llvm/test/CodeGen/RISCV/nest-register.ll
index 97704ebae4cb..e222beee4578 100644
--- a/llvm/test/CodeGen/RISCV/nest-register.ll
+++ b/llvm/test/CodeGen/RISCV/nest-register.ll
@@ -17,7 +17,6 @@ define ptr @nest_receiver(ptr nest %arg) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a0, t2
; RV64I-NEXT: ret
-;
ret ptr %arg
}
@@ -27,7 +26,7 @@ define ptr @nest_caller(ptr %arg) nounwind {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv t2, a0
-; RV32I-NEXT: call nest_receiver@plt
+; RV32I-NEXT: call nest_receiver
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -37,11 +36,10 @@ define ptr @nest_caller(ptr %arg) nounwind {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv t2, a0
-; RV64I-NEXT: call nest_receiver@plt
+; RV64I-NEXT: call nest_receiver
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
-;
%result = call ptr @nest_receiver(ptr nest %arg)
ret ptr %result
}
diff --git a/llvm/test/CodeGen/RISCV/nomerge.ll b/llvm/test/CodeGen/RISCV/nomerge.ll
index f4e50b6697ae..8e77adfc16e2 100644
--- a/llvm/test/CodeGen/RISCV/nomerge.ll
+++ b/llvm/test/CodeGen/RISCV/nomerge.ll
@@ -13,14 +13,14 @@ define void @foo(i32 %i) nounwind {
; CHECK-NEXT: li a1, 5
; CHECK-NEXT: bne a0, a1, .LBB0_4
; CHECK-NEXT: # %bb.2: # %if.then
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: j .LBB0_4
; CHECK-NEXT: .LBB0_3: # %if.then2
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: .LBB0_4: # %if.end3
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
entry:
switch i32 %i, label %if.end3 [
i32 5, label %if.then
@@ -46,9 +46,9 @@ define void @foo_tail(i1 %i) nounwind {
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: beqz a0, .LBB1_2
; CHECK-NEXT: # %bb.1: # %if.then
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
; CHECK-NEXT: .LBB1_2: # %if.else
-; CHECK-NEXT: tail bar@plt
+; CHECK-NEXT: tail bar
entry:
br i1 %i, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/RISCV/opt-w-instrs.mir b/llvm/test/CodeGen/RISCV/opt-w-instrs.mir
index ebac5a42fbcd..8c22eaf917e8 100644
--- a/llvm/test/CodeGen/RISCV/opt-w-instrs.mir
+++ b/llvm/test/CodeGen/RISCV/opt-w-instrs.mir
@@ -1,31 +1,25 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
-# RUN: llc -mtriple=riscv64 -mattr='+d,+zfa' -verify-machineinstrs -run-pass=riscv-opt-w-instrs %s -o - | FileCheck %s --check-prefix=CHECK-ZFA
+# RUN: llc -mtriple=riscv64 -mattr='+d,+zfa,+v' -verify-machineinstrs -run-pass=riscv-opt-w-instrs %s -o - | FileCheck %s
---
name: fcvtmod_w_d
tracksRegLiveness: true
body: |
bb.0.entry:
- liveins: $x10, $x11
+ liveins: $x10
- ; CHECK-ZFA-LABEL: name: fcvtmod_w_d
- ; CHECK-ZFA: liveins: $x10, $x11
- ; CHECK-ZFA-NEXT: {{ $}}
- ; CHECK-ZFA-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $x10
- ; CHECK-ZFA-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
- ; CHECK-ZFA-NEXT: [[FCVTMOD_W_D:%[0-9]+]]:gpr = nofpexcept FCVTMOD_W_D [[COPY]], 1
- ; CHECK-ZFA-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY1]], [[FCVTMOD_W_D]]
- ; CHECK-ZFA-NEXT: $x10 = COPY [[ADD]]
- ; CHECK-ZFA-NEXT: $x11 = COPY [[FCVTMOD_W_D]]
- ; CHECK-ZFA-NEXT: PseudoRET
+ ; CHECK-LABEL: name: fcvtmod_w_d
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $x10
+ ; CHECK-NEXT: [[FCVTMOD_W_D:%[0-9]+]]:gpr = nofpexcept FCVTMOD_W_D [[COPY]], 1
+ ; CHECK-NEXT: $x10 = COPY [[FCVTMOD_W_D]]
+ ; CHECK-NEXT: PseudoRET
%0:fpr64 = COPY $x10
- %1:gpr = COPY $x11
- %2:gpr = nofpexcept FCVTMOD_W_D %0, 1
- %3:gpr = ADD %1, %2
- %4:gpr = ADDIW %2, 0
- $x10 = COPY %3
- $x11 = COPY %4
+ %1:gpr = nofpexcept FCVTMOD_W_D %0, 1
+ %2:gpr = ADDIW %1, 0
+ $x10 = COPY %2
PseudoRET
...
@@ -36,15 +30,61 @@ body: |
bb.0.entry:
liveins: $x10, $x11
- ; CHECK-ZFA-LABEL: name: physreg
- ; CHECK-ZFA: liveins: $x10, $x11
- ; CHECK-ZFA-NEXT: {{ $}}
- ; CHECK-ZFA-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
- ; CHECK-ZFA-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[COPY]], 0
- ; CHECK-ZFA-NEXT: $x10 = COPY [[ADDIW]]
- ; CHECK-ZFA-NEXT: PseudoRET
+ ; CHECK-LABEL: name: physreg
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[COPY]], 0
+ ; CHECK-NEXT: $x10 = COPY [[ADDIW]]
+ ; CHECK-NEXT: PseudoRET
%0:gpr = COPY $x10
%1:gpr = ADDIW %0, 0
$x10 = COPY %1
PseudoRET
...
+---
+ name: vfirst
+ tracksRegLiveness: true
+ body: |
+ bb.0.entry:
+ liveins: $x10, $v8
+
+ ; CHECK-LABEL: name: vfirst
+ ; CHECK: liveins: $x10, $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[PseudoVFIRST_M_B1_:%[0-9]+]]:gpr = PseudoVFIRST_M_B1 [[COPY]], [[COPY1]], 0 /* e8 */
+ ; CHECK-NEXT: $x11 = COPY [[PseudoVFIRST_M_B1_]]
+ ; CHECK-NEXT: PseudoRET
+ %0:vr = COPY $v8
+ %1:gprnox0 = COPY $x10
+
+ %2:gpr = PseudoVFIRST_M_B1 %0:vr, %1:gprnox0, 0
+ %3:gpr = ADDIW %2, 0
+ $x11 = COPY %3
+ PseudoRET
+...
+---
+ name: vcpop
+ tracksRegLiveness: true
+ body: |
+ bb.0.entry:
+ liveins: $x10, $v8
+
+ ; CHECK-LABEL: name: vcpop
+ ; CHECK: liveins: $x10, $v8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[COPY]], [[COPY1]], 0 /* e8 */
+ ; CHECK-NEXT: $x11 = COPY [[PseudoVCPOP_M_B1_]]
+ ; CHECK-NEXT: PseudoRET
+ %0:vr = COPY $v8
+ %1:gprnox0 = COPY $x10
+
+ %2:gpr = PseudoVCPOP_M_B1 %0:vr, %1:gprnox0, 0
+ %3:gpr = ADDIW %2, 0
+ $x11 = COPY %3
+ PseudoRET
+...
diff --git a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
index ca80abc54601..7c6253b897ff 100644
--- a/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/out-of-reach-emergency-slot.mir
@@ -40,7 +40,7 @@
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: sd a0, -8(a1)
; CHECK-NEXT: ld a1, 0(sp)
- ; CHECK-NEXT: call foo@plt
+ ; CHECK-NEXT: call foo
; CHECK-NEXT: lui a0, 2
; CHECK-NEXT: sub sp, s0, a0
; CHECK-NEXT: addiw a0, a0, -2032
@@ -76,7 +76,9 @@ body: |
; we have to allocate a virtual register to compute it.
; A later run of the the register scavenger won't find an available register
; either so it will have to spill one to the emergency spill slot.
- PseudoCALL target-flags(riscv-plt) @foo, csr_ilp32_lp64, implicit-def $x1, implicit-def $x2, implicit $x1, implicit $x5, implicit $x6, implicit $x7, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x28, implicit $x29, implicit $x30, implicit $x31
+ PseudoCALL target-flags(riscv-call) @foo, csr_ilp32_lp64, implicit-def $x1, implicit-def $x2, implicit $x1, implicit $x5, implicit $x6, implicit $x7, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x28, implicit $x29, implicit $x30, implicit $x31
PseudoRET
...
+## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+# DEBUG: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
index 7c3294fa81dc..4bb65f376218 100644
--- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
@@ -451,7 +451,7 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
; RV32-NEXT: .LBB10_4:
; RV32-NEXT: neg s1, a2
; RV32-NEXT: and s1, s1, a3
-; RV32-NEXT: call use@plt
+; RV32-NEXT: call use
; RV32-NEXT: mv a0, s0
; RV32-NEXT: mv a1, s1
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -474,7 +474,7 @@ define i64 @uaddo6_xor_multi_use(i64 %a, i64 %b) {
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li s0, 42
; RV64-NEXT: .LBB10_2:
-; RV64-NEXT: call use@plt
+; RV64-NEXT: call use
; RV64-NEXT: mv a0, s0
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -1091,7 +1091,7 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
; RV32-NEXT: sltu s6, s4, s1
; RV32-NEXT: .LBB32_4: # %t
; RV32-NEXT: mv a0, s6
-; RV32-NEXT: call call@plt
+; RV32-NEXT: call call
; RV32-NEXT: beqz s6, .LBB32_8
; RV32-NEXT: # %bb.5: # %end
; RV32-NEXT: sltu a1, s4, s1
@@ -1145,7 +1145,7 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
; RV64-NEXT: mv s3, a0
; RV64-NEXT: sltu s4, a0, a1
; RV64-NEXT: mv a0, s4
-; RV64-NEXT: call call@plt
+; RV64-NEXT: call call
; RV64-NEXT: bgeu s3, s2, .LBB32_3
; RV64-NEXT: # %bb.2: # %end
; RV64-NEXT: sub a0, s3, s2
diff --git a/llvm/test/CodeGen/RISCV/pr51206.ll b/llvm/test/CodeGen/RISCV/pr51206.ll
index b83903e7c55c..f54031af0de5 100644
--- a/llvm/test/CodeGen/RISCV/pr51206.ll
+++ b/llvm/test/CodeGen/RISCV/pr51206.ll
@@ -31,7 +31,7 @@ define signext i32 @wobble() nounwind {
; CHECK-NEXT: # %bb.1: # %bb10
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; CHECK-NEXT: call quux@plt
+; CHECK-NEXT: call quux
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: .LBB0_2: # %bb12
diff --git a/llvm/test/CodeGen/RISCV/pr63816.ll b/llvm/test/CodeGen/RISCV/pr63816.ll
index 6eaec08abb90..2e33a0e99499 100644
--- a/llvm/test/CodeGen/RISCV/pr63816.ll
+++ b/llvm/test/CodeGen/RISCV/pr63816.ll
@@ -19,31 +19,31 @@ define void @test(ptr %0, ptr %1) nounwind {
; CHECK-NEXT: mv s1, a0
; CHECK-NEXT: lhu a0, 12(a0)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs0, fa0
; CHECK-NEXT: lhu a0, 10(s1)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs1, fa0
; CHECK-NEXT: lhu a0, 8(s1)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs2, fa0
; CHECK-NEXT: lhu a0, 6(s1)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs3, fa0
; CHECK-NEXT: lhu a0, 4(s1)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs4, fa0
; CHECK-NEXT: lhu a0, 2(s1)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fmv.s fs5, fa0
; CHECK-NEXT: lhu a0, 0(s1)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fcvt.d.s fs6, fa0
; CHECK-NEXT: fcvt.d.s fs5, fs5
; CHECK-NEXT: fcvt.d.s fs4, fs4
@@ -53,7 +53,7 @@ define void @test(ptr %0, ptr %1) nounwind {
; CHECK-NEXT: fcvt.d.s fs1, fs1
; CHECK-NEXT: fmv.w.x fa0, a0
; CHECK-NEXT: fcvt.d.s fs0, fs0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fcvt.d.s fa5, fa0
; CHECK-NEXT: fsd fa5, 56(s0)
; CHECK-NEXT: fsd fs0, 48(s0)
diff --git a/llvm/test/CodeGen/RISCV/push-pop-popret.ll b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
index 9ff4235746ca..945e7b46f8c9 100644
--- a/llvm/test/CodeGen/RISCV/push-pop-popret.ll
+++ b/llvm/test/CodeGen/RISCV/push-pop-popret.ll
@@ -24,7 +24,7 @@ define i32 @foo() {
; RV32IZCMP-NEXT: .cfi_def_cfa_offset 528
; RV32IZCMP-NEXT: .cfi_offset ra, -4
; RV32IZCMP-NEXT: mv a0, sp
-; RV32IZCMP-NEXT: call test@plt
+; RV32IZCMP-NEXT: call test
; RV32IZCMP-NEXT: addi sp, sp, 464
; RV32IZCMP-NEXT: cm.popretz {ra}, 64
;
@@ -35,7 +35,7 @@ define i32 @foo() {
; RV64IZCMP-NEXT: .cfi_def_cfa_offset 528
; RV64IZCMP-NEXT: .cfi_offset ra, -8
; RV64IZCMP-NEXT: mv a0, sp
-; RV64IZCMP-NEXT: call test@plt
+; RV64IZCMP-NEXT: call test
; RV64IZCMP-NEXT: addi sp, sp, 464
; RV64IZCMP-NEXT: cm.popretz {ra}, 64
;
@@ -46,7 +46,7 @@ define i32 @foo() {
; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
; RV32IZCMP-SR-NEXT: mv a0, sp
-; RV32IZCMP-SR-NEXT: call test@plt
+; RV32IZCMP-SR-NEXT: call test
; RV32IZCMP-SR-NEXT: addi sp, sp, 464
; RV32IZCMP-SR-NEXT: cm.popretz {ra}, 64
;
@@ -57,7 +57,7 @@ define i32 @foo() {
; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
; RV64IZCMP-SR-NEXT: mv a0, sp
-; RV64IZCMP-SR-NEXT: call test@plt
+; RV64IZCMP-SR-NEXT: call test
; RV64IZCMP-SR-NEXT: addi sp, sp, 464
; RV64IZCMP-SR-NEXT: cm.popretz {ra}, 64
;
@@ -68,7 +68,7 @@ define i32 @foo() {
; RV32I-NEXT: sw ra, 524(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: addi a0, sp, 12
-; RV32I-NEXT: call test@plt
+; RV32I-NEXT: call test
; RV32I-NEXT: li a0, 0
; RV32I-NEXT: lw ra, 524(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 528
@@ -81,7 +81,7 @@ define i32 @foo() {
; RV64I-NEXT: sd ra, 520(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: addi a0, sp, 8
-; RV64I-NEXT: call test@plt
+; RV64I-NEXT: call test
; RV64I-NEXT: li a0, 0
; RV64I-NEXT: ld ra, 520(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 528
@@ -105,7 +105,7 @@ define i32 @pushpopret0(i32 signext %size){
; RV32IZCMP-NEXT: andi a0, a0, -16
; RV32IZCMP-NEXT: sub a0, sp, a0
; RV32IZCMP-NEXT: mv sp, a0
-; RV32IZCMP-NEXT: call callee_void@plt
+; RV32IZCMP-NEXT: call callee_void
; RV32IZCMP-NEXT: addi sp, s0, -16
; RV32IZCMP-NEXT: cm.popretz {ra, s0}, 16
;
@@ -123,7 +123,7 @@ define i32 @pushpopret0(i32 signext %size){
; RV64IZCMP-NEXT: andi a0, a0, -16
; RV64IZCMP-NEXT: sub a0, sp, a0
; RV64IZCMP-NEXT: mv sp, a0
-; RV64IZCMP-NEXT: call callee_void@plt
+; RV64IZCMP-NEXT: call callee_void
; RV64IZCMP-NEXT: addi sp, s0, -16
; RV64IZCMP-NEXT: cm.popretz {ra, s0}, 16
;
@@ -139,7 +139,7 @@ define i32 @pushpopret0(i32 signext %size){
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
; RV32IZCMP-SR-NEXT: mv sp, a0
-; RV32IZCMP-SR-NEXT: call callee_void@plt
+; RV32IZCMP-SR-NEXT: call callee_void
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
; RV32IZCMP-SR-NEXT: cm.popretz {ra, s0}, 16
;
@@ -157,7 +157,7 @@ define i32 @pushpopret0(i32 signext %size){
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
; RV64IZCMP-SR-NEXT: mv sp, a0
-; RV64IZCMP-SR-NEXT: call callee_void@plt
+; RV64IZCMP-SR-NEXT: call callee_void
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
; RV64IZCMP-SR-NEXT: cm.popretz {ra, s0}, 16
;
@@ -175,7 +175,7 @@ define i32 @pushpopret0(i32 signext %size){
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
-; RV32I-NEXT: call callee_void@plt
+; RV32I-NEXT: call callee_void
; RV32I-NEXT: li a0, 0
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -199,7 +199,7 @@ define i32 @pushpopret0(i32 signext %size){
; RV64I-NEXT: andi a0, a0, -16
; RV64I-NEXT: sub a0, sp, a0
; RV64I-NEXT: mv sp, a0
-; RV64I-NEXT: call callee_void@plt
+; RV64I-NEXT: call callee_void
; RV64I-NEXT: li a0, 0
; RV64I-NEXT: addi sp, s0, -16
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -225,7 +225,7 @@ define i32 @pushpopret1(i32 signext %size) {
; RV32IZCMP-NEXT: andi a0, a0, -16
; RV32IZCMP-NEXT: sub a0, sp, a0
; RV32IZCMP-NEXT: mv sp, a0
-; RV32IZCMP-NEXT: call callee_void@plt
+; RV32IZCMP-NEXT: call callee_void
; RV32IZCMP-NEXT: li a0, 1
; RV32IZCMP-NEXT: addi sp, s0, -16
; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
@@ -244,7 +244,7 @@ define i32 @pushpopret1(i32 signext %size) {
; RV64IZCMP-NEXT: andi a0, a0, -16
; RV64IZCMP-NEXT: sub a0, sp, a0
; RV64IZCMP-NEXT: mv sp, a0
-; RV64IZCMP-NEXT: call callee_void@plt
+; RV64IZCMP-NEXT: call callee_void
; RV64IZCMP-NEXT: li a0, 1
; RV64IZCMP-NEXT: addi sp, s0, -16
; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
@@ -261,7 +261,7 @@ define i32 @pushpopret1(i32 signext %size) {
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
; RV32IZCMP-SR-NEXT: mv sp, a0
-; RV32IZCMP-SR-NEXT: call callee_void@plt
+; RV32IZCMP-SR-NEXT: call callee_void
; RV32IZCMP-SR-NEXT: li a0, 1
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
@@ -280,7 +280,7 @@ define i32 @pushpopret1(i32 signext %size) {
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
; RV64IZCMP-SR-NEXT: mv sp, a0
-; RV64IZCMP-SR-NEXT: call callee_void@plt
+; RV64IZCMP-SR-NEXT: call callee_void
; RV64IZCMP-SR-NEXT: li a0, 1
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
@@ -299,7 +299,7 @@ define i32 @pushpopret1(i32 signext %size) {
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
-; RV32I-NEXT: call callee_void@plt
+; RV32I-NEXT: call callee_void
; RV32I-NEXT: li a0, 1
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -323,7 +323,7 @@ define i32 @pushpopret1(i32 signext %size) {
; RV64I-NEXT: andi a0, a0, -16
; RV64I-NEXT: sub a0, sp, a0
; RV64I-NEXT: mv sp, a0
-; RV64I-NEXT: call callee_void@plt
+; RV64I-NEXT: call callee_void
; RV64I-NEXT: li a0, 1
; RV64I-NEXT: addi sp, s0, -16
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -349,7 +349,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
; RV32IZCMP-NEXT: andi a0, a0, -16
; RV32IZCMP-NEXT: sub a0, sp, a0
; RV32IZCMP-NEXT: mv sp, a0
-; RV32IZCMP-NEXT: call callee_void@plt
+; RV32IZCMP-NEXT: call callee_void
; RV32IZCMP-NEXT: li a0, -1
; RV32IZCMP-NEXT: addi sp, s0, -16
; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
@@ -368,7 +368,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
; RV64IZCMP-NEXT: andi a0, a0, -16
; RV64IZCMP-NEXT: sub a0, sp, a0
; RV64IZCMP-NEXT: mv sp, a0
-; RV64IZCMP-NEXT: call callee_void@plt
+; RV64IZCMP-NEXT: call callee_void
; RV64IZCMP-NEXT: li a0, -1
; RV64IZCMP-NEXT: addi sp, s0, -16
; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
@@ -385,7 +385,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
; RV32IZCMP-SR-NEXT: mv sp, a0
-; RV32IZCMP-SR-NEXT: call callee_void@plt
+; RV32IZCMP-SR-NEXT: call callee_void
; RV32IZCMP-SR-NEXT: li a0, -1
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
@@ -404,7 +404,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
; RV64IZCMP-SR-NEXT: mv sp, a0
-; RV64IZCMP-SR-NEXT: call callee_void@plt
+; RV64IZCMP-SR-NEXT: call callee_void
; RV64IZCMP-SR-NEXT: li a0, -1
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
@@ -423,7 +423,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
-; RV32I-NEXT: call callee_void@plt
+; RV32I-NEXT: call callee_void
; RV32I-NEXT: li a0, -1
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -447,7 +447,7 @@ define i32 @pushpopretneg1(i32 signext %size) {
; RV64I-NEXT: andi a0, a0, -16
; RV64I-NEXT: sub a0, sp, a0
; RV64I-NEXT: mv sp, a0
-; RV64I-NEXT: call callee_void@plt
+; RV64I-NEXT: call callee_void
; RV64I-NEXT: li a0, -1
; RV64I-NEXT: addi sp, s0, -16
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -473,7 +473,7 @@ define i32 @pushpopret2(i32 signext %size) {
; RV32IZCMP-NEXT: andi a0, a0, -16
; RV32IZCMP-NEXT: sub a0, sp, a0
; RV32IZCMP-NEXT: mv sp, a0
-; RV32IZCMP-NEXT: call callee_void@plt
+; RV32IZCMP-NEXT: call callee_void
; RV32IZCMP-NEXT: li a0, 2
; RV32IZCMP-NEXT: addi sp, s0, -16
; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
@@ -492,7 +492,7 @@ define i32 @pushpopret2(i32 signext %size) {
; RV64IZCMP-NEXT: andi a0, a0, -16
; RV64IZCMP-NEXT: sub a0, sp, a0
; RV64IZCMP-NEXT: mv sp, a0
-; RV64IZCMP-NEXT: call callee_void@plt
+; RV64IZCMP-NEXT: call callee_void
; RV64IZCMP-NEXT: li a0, 2
; RV64IZCMP-NEXT: addi sp, s0, -16
; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
@@ -509,7 +509,7 @@ define i32 @pushpopret2(i32 signext %size) {
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
; RV32IZCMP-SR-NEXT: mv sp, a0
-; RV32IZCMP-SR-NEXT: call callee_void@plt
+; RV32IZCMP-SR-NEXT: call callee_void
; RV32IZCMP-SR-NEXT: li a0, 2
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
@@ -528,7 +528,7 @@ define i32 @pushpopret2(i32 signext %size) {
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
; RV64IZCMP-SR-NEXT: mv sp, a0
-; RV64IZCMP-SR-NEXT: call callee_void@plt
+; RV64IZCMP-SR-NEXT: call callee_void
; RV64IZCMP-SR-NEXT: li a0, 2
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
@@ -547,7 +547,7 @@ define i32 @pushpopret2(i32 signext %size) {
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
-; RV32I-NEXT: call callee_void@plt
+; RV32I-NEXT: call callee_void
; RV32I-NEXT: li a0, 2
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -571,7 +571,7 @@ define i32 @pushpopret2(i32 signext %size) {
; RV64I-NEXT: andi a0, a0, -16
; RV64I-NEXT: sub a0, sp, a0
; RV64I-NEXT: mv sp, a0
-; RV64I-NEXT: call callee_void@plt
+; RV64I-NEXT: call callee_void
; RV64I-NEXT: li a0, 2
; RV64I-NEXT: addi sp, s0, -16
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -599,7 +599,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
; RV32IZCMP-NEXT: mv sp, a0
; RV32IZCMP-NEXT: addi sp, s0, -16
; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
-; RV32IZCMP-NEXT: tail callee@plt
+; RV32IZCMP-NEXT: tail callee
;
; RV64IZCMP-LABEL: tailcall:
; RV64IZCMP: # %bb.0: # %entry
@@ -617,7 +617,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
; RV64IZCMP-NEXT: mv sp, a0
; RV64IZCMP-NEXT: addi sp, s0, -16
; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
-; RV64IZCMP-NEXT: tail callee@plt
+; RV64IZCMP-NEXT: tail callee
;
; RV32IZCMP-SR-LABEL: tailcall:
; RV32IZCMP-SR: # %bb.0: # %entry
@@ -633,7 +633,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
; RV32IZCMP-SR-NEXT: mv sp, a0
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
; RV32IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
-; RV32IZCMP-SR-NEXT: tail callee@plt
+; RV32IZCMP-SR-NEXT: tail callee
;
; RV64IZCMP-SR-LABEL: tailcall:
; RV64IZCMP-SR: # %bb.0: # %entry
@@ -651,7 +651,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
; RV64IZCMP-SR-NEXT: mv sp, a0
; RV64IZCMP-SR-NEXT: addi sp, s0, -16
; RV64IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
-; RV64IZCMP-SR-NEXT: tail callee@plt
+; RV64IZCMP-SR-NEXT: tail callee
;
; RV32I-LABEL: tailcall:
; RV32I: # %bb.0: # %entry
@@ -671,7 +671,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
-; RV32I-NEXT: tail callee@plt
+; RV32I-NEXT: tail callee
;
; RV64I-LABEL: tailcall:
; RV64I: # %bb.0: # %entry
@@ -693,7 +693,7 @@ define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: tail callee@plt
+; RV64I-NEXT: tail callee
entry:
%0 = alloca i8, i32 %size, align 16
%1 = tail call i32 @callee(i8* nonnull %0)
@@ -730,7 +730,7 @@ define i32 @nocompress(i32 signext %size) {
; RV32IZCMP-NEXT: addi s7, s1, %lo(var)
; RV32IZCMP-NEXT: lw s8, 16(s7)
; RV32IZCMP-NEXT: mv a0, s2
-; RV32IZCMP-NEXT: call callee_void@plt
+; RV32IZCMP-NEXT: call callee_void
; RV32IZCMP-NEXT: sw s8, 16(s7)
; RV32IZCMP-NEXT: sw s6, %lo(var+12)(s1)
; RV32IZCMP-NEXT: sw s5, %lo(var+8)(s1)
@@ -739,7 +739,7 @@ define i32 @nocompress(i32 signext %size) {
; RV32IZCMP-NEXT: mv a0, s2
; RV32IZCMP-NEXT: addi sp, s0, -48
; RV32IZCMP-NEXT: cm.pop {ra, s0-s8}, 48
-; RV32IZCMP-NEXT: tail callee@plt
+; RV32IZCMP-NEXT: tail callee
;
; RV64IZCMP-LABEL: nocompress:
; RV64IZCMP: # %bb.0: # %entry
@@ -771,7 +771,7 @@ define i32 @nocompress(i32 signext %size) {
; RV64IZCMP-NEXT: addi s7, s1, %lo(var)
; RV64IZCMP-NEXT: lw s8, 16(s7)
; RV64IZCMP-NEXT: mv a0, s2
-; RV64IZCMP-NEXT: call callee_void@plt
+; RV64IZCMP-NEXT: call callee_void
; RV64IZCMP-NEXT: sw s8, 16(s7)
; RV64IZCMP-NEXT: sw s6, %lo(var+12)(s1)
; RV64IZCMP-NEXT: sw s5, %lo(var+8)(s1)
@@ -780,7 +780,7 @@ define i32 @nocompress(i32 signext %size) {
; RV64IZCMP-NEXT: mv a0, s2
; RV64IZCMP-NEXT: addi sp, s0, -80
; RV64IZCMP-NEXT: cm.pop {ra, s0-s8}, 80
-; RV64IZCMP-NEXT: tail callee@plt
+; RV64IZCMP-NEXT: tail callee
;
; RV32IZCMP-SR-LABEL: nocompress:
; RV32IZCMP-SR: # %bb.0: # %entry
@@ -810,7 +810,7 @@ define i32 @nocompress(i32 signext %size) {
; RV32IZCMP-SR-NEXT: addi s7, s1, %lo(var)
; RV32IZCMP-SR-NEXT: lw s8, 16(s7)
; RV32IZCMP-SR-NEXT: mv a0, s2
-; RV32IZCMP-SR-NEXT: call callee_void@plt
+; RV32IZCMP-SR-NEXT: call callee_void
; RV32IZCMP-SR-NEXT: sw s8, 16(s7)
; RV32IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
; RV32IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
@@ -819,7 +819,7 @@ define i32 @nocompress(i32 signext %size) {
; RV32IZCMP-SR-NEXT: mv a0, s2
; RV32IZCMP-SR-NEXT: addi sp, s0, -48
; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 48
-; RV32IZCMP-SR-NEXT: tail callee@plt
+; RV32IZCMP-SR-NEXT: tail callee
;
; RV64IZCMP-SR-LABEL: nocompress:
; RV64IZCMP-SR: # %bb.0: # %entry
@@ -851,7 +851,7 @@ define i32 @nocompress(i32 signext %size) {
; RV64IZCMP-SR-NEXT: addi s7, s1, %lo(var)
; RV64IZCMP-SR-NEXT: lw s8, 16(s7)
; RV64IZCMP-SR-NEXT: mv a0, s2
-; RV64IZCMP-SR-NEXT: call callee_void@plt
+; RV64IZCMP-SR-NEXT: call callee_void
; RV64IZCMP-SR-NEXT: sw s8, 16(s7)
; RV64IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
; RV64IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
@@ -860,7 +860,7 @@ define i32 @nocompress(i32 signext %size) {
; RV64IZCMP-SR-NEXT: mv a0, s2
; RV64IZCMP-SR-NEXT: addi sp, s0, -80
; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 80
-; RV64IZCMP-SR-NEXT: tail callee@plt
+; RV64IZCMP-SR-NEXT: tail callee
;
; RV32I-LABEL: nocompress:
; RV32I: # %bb.0: # %entry
@@ -900,7 +900,7 @@ define i32 @nocompress(i32 signext %size) {
; RV32I-NEXT: addi s7, s2, %lo(var)
; RV32I-NEXT: lw s8, 16(s7)
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call callee_void@plt
+; RV32I-NEXT: call callee_void
; RV32I-NEXT: sw s8, 16(s7)
; RV32I-NEXT: sw s6, %lo(var+12)(s2)
; RV32I-NEXT: sw s5, %lo(var+8)(s2)
@@ -919,7 +919,7 @@ define i32 @nocompress(i32 signext %size) {
; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 48
-; RV32I-NEXT: tail callee@plt
+; RV32I-NEXT: tail callee
;
; RV64I-LABEL: nocompress:
; RV64I: # %bb.0: # %entry
@@ -961,7 +961,7 @@ define i32 @nocompress(i32 signext %size) {
; RV64I-NEXT: addi s7, s2, %lo(var)
; RV64I-NEXT: lw s8, 16(s7)
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call callee_void@plt
+; RV64I-NEXT: call callee_void
; RV64I-NEXT: sw s8, 16(s7)
; RV64I-NEXT: sw s6, %lo(var+12)(s2)
; RV64I-NEXT: sw s5, %lo(var+8)(s2)
@@ -980,7 +980,7 @@ define i32 @nocompress(i32 signext %size) {
; RV64I-NEXT: ld s7, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s8, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 80
-; RV64I-NEXT: tail callee@plt
+; RV64I-NEXT: tail callee
entry:
%0 = alloca i8, i32 %size, align 16
%val = load [5 x i32], [5 x i32]* @var
@@ -1405,7 +1405,7 @@ define void @alloca(i32 %n) nounwind {
; RV32IZCMP-NEXT: andi a0, a0, -16
; RV32IZCMP-NEXT: sub a0, sp, a0
; RV32IZCMP-NEXT: mv sp, a0
-; RV32IZCMP-NEXT: call notdead@plt
+; RV32IZCMP-NEXT: call notdead
; RV32IZCMP-NEXT: mv sp, s1
; RV32IZCMP-NEXT: addi sp, s0, -16
; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 16
@@ -1421,7 +1421,7 @@ define void @alloca(i32 %n) nounwind {
; RV64IZCMP-NEXT: andi a0, a0, -16
; RV64IZCMP-NEXT: sub a0, sp, a0
; RV64IZCMP-NEXT: mv sp, a0
-; RV64IZCMP-NEXT: call notdead@plt
+; RV64IZCMP-NEXT: call notdead
; RV64IZCMP-NEXT: mv sp, s1
; RV64IZCMP-NEXT: addi sp, s0, -32
; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
@@ -1435,7 +1435,7 @@ define void @alloca(i32 %n) nounwind {
; RV32IZCMP-SR-NEXT: andi a0, a0, -16
; RV32IZCMP-SR-NEXT: sub a0, sp, a0
; RV32IZCMP-SR-NEXT: mv sp, a0
-; RV32IZCMP-SR-NEXT: call notdead@plt
+; RV32IZCMP-SR-NEXT: call notdead
; RV32IZCMP-SR-NEXT: mv sp, s1
; RV32IZCMP-SR-NEXT: addi sp, s0, -16
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 16
@@ -1451,7 +1451,7 @@ define void @alloca(i32 %n) nounwind {
; RV64IZCMP-SR-NEXT: andi a0, a0, -16
; RV64IZCMP-SR-NEXT: sub a0, sp, a0
; RV64IZCMP-SR-NEXT: mv sp, a0
-; RV64IZCMP-SR-NEXT: call notdead@plt
+; RV64IZCMP-SR-NEXT: call notdead
; RV64IZCMP-SR-NEXT: mv sp, s1
; RV64IZCMP-SR-NEXT: addi sp, s0, -32
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
@@ -1468,7 +1468,7 @@ define void @alloca(i32 %n) nounwind {
; RV32I-NEXT: andi a0, a0, -16
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
-; RV32I-NEXT: call notdead@plt
+; RV32I-NEXT: call notdead
; RV32I-NEXT: mv sp, s1
; RV32I-NEXT: addi sp, s0, -16
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1491,7 +1491,7 @@ define void @alloca(i32 %n) nounwind {
; RV64I-NEXT: andi a0, a0, -16
; RV64I-NEXT: sub a0, sp, a0
; RV64I-NEXT: mv sp, a0
-; RV64I-NEXT: call notdead@plt
+; RV64I-NEXT: call notdead
; RV64I-NEXT: mv sp, s1
; RV64I-NEXT: addi sp, s0, -32
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1529,7 +1529,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
-; RV32IZCMP-NEXT: call foo_test_irq@plt
+; RV32IZCMP-NEXT: call foo_test_irq
; RV32IZCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
; RV32IZCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
@@ -1568,7 +1568,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
-; RV64IZCMP-NEXT: call foo_test_irq@plt
+; RV64IZCMP-NEXT: call foo_test_irq
; RV64IZCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
; RV64IZCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
@@ -1607,7 +1607,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32IZCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
; RV32IZCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
-; RV32IZCMP-SR-NEXT: call foo_test_irq@plt
+; RV32IZCMP-SR-NEXT: call foo_test_irq
; RV32IZCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
; RV32IZCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
@@ -1646,7 +1646,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64IZCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
; RV64IZCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
-; RV64IZCMP-SR-NEXT: call foo_test_irq@plt
+; RV64IZCMP-SR-NEXT: call foo_test_irq
; RV64IZCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
; RV64IZCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
@@ -1685,7 +1685,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV32I-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call foo_test_irq@plt
+; RV32I-NEXT: call foo_test_irq
; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
@@ -1724,7 +1724,7 @@ define void @foo_with_irq() nounwind "interrupt"="user" {
; RV64I-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call foo_test_irq@plt
+; RV64I-NEXT: call foo_test_irq
; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
@@ -1751,32 +1751,32 @@ define void @foo_no_irq() nounwind{
; RV32IZCMP-LABEL: foo_no_irq:
; RV32IZCMP: # %bb.0:
; RV32IZCMP-NEXT: cm.push {ra}, -16
-; RV32IZCMP-NEXT: call foo_test_irq@plt
+; RV32IZCMP-NEXT: call foo_test_irq
; RV32IZCMP-NEXT: cm.popret {ra}, 16
;
; RV64IZCMP-LABEL: foo_no_irq:
; RV64IZCMP: # %bb.0:
; RV64IZCMP-NEXT: cm.push {ra}, -16
-; RV64IZCMP-NEXT: call foo_test_irq@plt
+; RV64IZCMP-NEXT: call foo_test_irq
; RV64IZCMP-NEXT: cm.popret {ra}, 16
;
; RV32IZCMP-SR-LABEL: foo_no_irq:
; RV32IZCMP-SR: # %bb.0:
; RV32IZCMP-SR-NEXT: cm.push {ra}, -16
-; RV32IZCMP-SR-NEXT: call foo_test_irq@plt
+; RV32IZCMP-SR-NEXT: call foo_test_irq
; RV32IZCMP-SR-NEXT: cm.popret {ra}, 16
;
; RV64IZCMP-SR-LABEL: foo_no_irq:
; RV64IZCMP-SR: # %bb.0:
; RV64IZCMP-SR-NEXT: cm.push {ra}, -16
-; RV64IZCMP-SR-NEXT: call foo_test_irq@plt
+; RV64IZCMP-SR-NEXT: call foo_test_irq
; RV64IZCMP-SR-NEXT: cm.popret {ra}, 16
;
; RV32I-LABEL: foo_no_irq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call foo_test_irq@plt
+; RV32I-NEXT: call foo_test_irq
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -1785,7 +1785,7 @@ define void @foo_no_irq() nounwind{
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call foo_test_irq@plt
+; RV64I-NEXT: call foo_test_irq
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -3115,7 +3115,7 @@ define i32 @use_fp(i32 %x) {
; RV32IZCMP-NEXT: mv s1, a0
; RV32IZCMP-NEXT: addi a1, s0, -20
; RV32IZCMP-NEXT: mv a0, s0
-; RV32IZCMP-NEXT: call bar@plt
+; RV32IZCMP-NEXT: call bar
; RV32IZCMP-NEXT: mv a0, s1
; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
;
@@ -3131,7 +3131,7 @@ define i32 @use_fp(i32 %x) {
; RV64IZCMP-NEXT: mv s1, a0
; RV64IZCMP-NEXT: addi a1, s0, -36
; RV64IZCMP-NEXT: mv a0, s0
-; RV64IZCMP-NEXT: call bar@plt
+; RV64IZCMP-NEXT: call bar
; RV64IZCMP-NEXT: mv a0, s1
; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 48
;
@@ -3147,7 +3147,7 @@ define i32 @use_fp(i32 %x) {
; RV32IZCMP-SR-NEXT: mv s1, a0
; RV32IZCMP-SR-NEXT: addi a1, s0, -20
; RV32IZCMP-SR-NEXT: mv a0, s0
-; RV32IZCMP-SR-NEXT: call bar@plt
+; RV32IZCMP-SR-NEXT: call bar
; RV32IZCMP-SR-NEXT: mv a0, s1
; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
;
@@ -3163,7 +3163,7 @@ define i32 @use_fp(i32 %x) {
; RV64IZCMP-SR-NEXT: mv s1, a0
; RV64IZCMP-SR-NEXT: addi a1, s0, -36
; RV64IZCMP-SR-NEXT: mv a0, s0
-; RV64IZCMP-SR-NEXT: call bar@plt
+; RV64IZCMP-SR-NEXT: call bar
; RV64IZCMP-SR-NEXT: mv a0, s1
; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 48
;
@@ -3182,7 +3182,7 @@ define i32 @use_fp(i32 %x) {
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: addi a1, s0, -16
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call bar@plt
+; RV32I-NEXT: call bar
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -3205,7 +3205,7 @@ define i32 @use_fp(i32 %x) {
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: addi a1, s0, -28
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call bar@plt
+; RV64I-NEXT: call bar
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll b/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll
index bb65f408c77a..351408a7f085 100644
--- a/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll
+++ b/llvm/test/CodeGen/RISCV/reduce-unnecessary-extension.ll
@@ -21,15 +21,15 @@ define signext i32 @test() nounwind {
; RV64I-NEXT: beqz s0, .LBB0_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call test1@plt
+; RV64I-NEXT: call test1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call test2@plt
+; RV64I-NEXT: call test2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call test3@plt
+; RV64I-NEXT: call test3
; RV64I-NEXT: j .LBB0_3
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 0
-; RV64I-NEXT: call test2@plt
+; RV64I-NEXT: call test2
; RV64I-NEXT: .LBB0_3:
; RV64I-NEXT: li a0, 0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -68,7 +68,7 @@ define signext i32 @test_loop() nounwind {
; RV64I-NEXT: j .LBB1_2
; RV64I-NEXT: .LBB1_1: # in Loop: Header=BB1_2 Depth=1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call test2@plt
+; RV64I-NEXT: call test2
; RV64I-NEXT: addiw s1, s1, 1
; RV64I-NEXT: beqz s1, .LBB1_4
; RV64I-NEXT: .LBB1_2: # =>This Inner Loop Header: Depth=1
@@ -76,11 +76,11 @@ define signext i32 @test_loop() nounwind {
; RV64I-NEXT: beqz s0, .LBB1_1
; RV64I-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call test1@plt
+; RV64I-NEXT: call test1
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call test2@plt
+; RV64I-NEXT: call test2
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call test3@plt
+; RV64I-NEXT: call test3
; RV64I-NEXT: addiw s1, s1, 1
; RV64I-NEXT: bnez s1, .LBB1_2
; RV64I-NEXT: .LBB1_4:
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index f017d8dff2bd..804bc053728d 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -42,7 +42,7 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: call func@plt
+; CHECK-NEXT: call func
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vrgather.vv v16, v8, v12, v0.t
@@ -108,7 +108,7 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: vfwadd.vv v16, v8, v12, v0.t
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; SUBREGLIVENESS-NEXT: call func@plt
+; SUBREGLIVENESS-NEXT: call func
; SUBREGLIVENESS-NEXT: li a0, 32
; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vrgather.vv v16, v8, v12, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll
index feece1f54ffc..5b27c4129df6 100644
--- a/llvm/test/CodeGen/RISCV/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rem.ll
@@ -11,7 +11,7 @@
define i32 @urem(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: urem:
; RV32I: # %bb.0:
-; RV32I-NEXT: tail __umodsi3@plt
+; RV32I-NEXT: tail __umodsi3
;
; RV32IM-LABEL: urem:
; RV32IM: # %bb.0:
@@ -26,7 +26,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -44,7 +44,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: li a0, 10
-; RV32I-NEXT: tail __umodsi3@plt
+; RV32I-NEXT: tail __umodsi3
;
; RV32IM-LABEL: urem_constant_lhs:
; RV32IM: # %bb.0:
@@ -59,7 +59,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a1, a0, 32
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -76,7 +76,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
define i32 @srem(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: srem:
; RV32I: # %bb.0:
-; RV32I-NEXT: tail __modsi3@plt
+; RV32I-NEXT: tail __modsi3
;
; RV32IM-LABEL: srem:
; RV32IM: # %bb.0:
@@ -89,7 +89,7 @@ define i32 @srem(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -191,7 +191,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: li a0, -10
-; RV32I-NEXT: tail __modsi3@plt
+; RV32I-NEXT: tail __modsi3
;
; RV32IM-LABEL: srem_constant_lhs:
; RV32IM: # %bb.0:
@@ -205,7 +205,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -224,7 +224,7 @@ define i64 @urem64(i64 %a, i64 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __umoddi3@plt
+; RV32I-NEXT: call __umoddi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -233,14 +233,14 @@ define i64 @urem64(i64 %a, i64 %b) nounwind {
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -16
; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT: call __umoddi3@plt
+; RV32IM-NEXT: call __umoddi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
;
; RV64I-LABEL: urem64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __umoddi3@plt
+; RV64I-NEXT: tail __umoddi3
;
; RV64IM-LABEL: urem64:
; RV64IM: # %bb.0:
@@ -259,7 +259,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: li a0, 10
; RV32I-NEXT: li a1, 0
-; RV32I-NEXT: call __umoddi3@plt
+; RV32I-NEXT: call __umoddi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -272,7 +272,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
; RV32IM-NEXT: mv a2, a0
; RV32IM-NEXT: li a0, 10
; RV32IM-NEXT: li a1, 0
-; RV32IM-NEXT: call __umoddi3@plt
+; RV32IM-NEXT: call __umoddi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -281,7 +281,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __umoddi3@plt
+; RV64I-NEXT: tail __umoddi3
;
; RV64IM-LABEL: urem64_constant_lhs:
; RV64IM: # %bb.0:
@@ -297,7 +297,7 @@ define i64 @srem64(i64 %a, i64 %b) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __moddi3@plt
+; RV32I-NEXT: call __moddi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -306,14 +306,14 @@ define i64 @srem64(i64 %a, i64 %b) nounwind {
; RV32IM: # %bb.0:
; RV32IM-NEXT: addi sp, sp, -16
; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IM-NEXT: call __moddi3@plt
+; RV32IM-NEXT: call __moddi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
;
; RV64I-LABEL: srem64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __moddi3@plt
+; RV64I-NEXT: tail __moddi3
;
; RV64IM-LABEL: srem64:
; RV64IM: # %bb.0:
@@ -332,7 +332,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: li a0, -10
; RV32I-NEXT: li a1, -1
-; RV32I-NEXT: call __moddi3@plt
+; RV32I-NEXT: call __moddi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -345,7 +345,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
; RV32IM-NEXT: mv a2, a0
; RV32IM-NEXT: li a0, -10
; RV32IM-NEXT: li a1, -1
-; RV32IM-NEXT: call __moddi3@plt
+; RV32IM-NEXT: call __moddi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -354,7 +354,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: tail __moddi3@plt
+; RV64I-NEXT: tail __moddi3
;
; RV64IM-LABEL: srem64_constant_lhs:
; RV64IM: # %bb.0:
@@ -372,7 +372,7 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: andi a0, a0, 255
; RV32I-NEXT: andi a1, a1, 255
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -390,7 +390,7 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -412,7 +412,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: andi a1, a0, 255
; RV32I-NEXT: li a0, 10
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -430,7 +430,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a1, a0, 255
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -455,7 +455,7 @@ define i8 @srem8(i8 %a, i8 %b) nounwind {
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: srai a1, a1, 24
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -477,7 +477,7 @@ define i8 @srem8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: srai a1, a1, 56
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -502,7 +502,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a0, 24
; RV32I-NEXT: li a0, -10
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -522,7 +522,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a0, 56
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -548,7 +548,7 @@ define i16 @urem16(i16 %a, i16 %b) nounwind {
; RV32I-NEXT: addi a2, a2, -1
; RV32I-NEXT: and a0, a0, a2
; RV32I-NEXT: and a1, a1, a2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -570,7 +570,7 @@ define i16 @urem16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: addiw a2, a2, -1
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -595,7 +595,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srli a1, a0, 16
; RV32I-NEXT: li a0, 10
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -615,7 +615,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a1, a0, 48
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -640,7 +640,7 @@ define i16 @srem16(i16 %a, i16 %b) nounwind {
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: srai a1, a1, 16
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -662,7 +662,7 @@ define i16 @srem16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srai a1, a1, 48
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -687,7 +687,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a1, a0, 16
; RV32I-NEXT: li a0, -10
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -707,7 +707,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a1, a0, 48
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll
index e1c825fc8f26..92ae85f560cd 100644
--- a/llvm/test/CodeGen/RISCV/remat.ll
+++ b/llvm/test/CodeGen/RISCV/remat.ll
@@ -70,7 +70,7 @@ define i32 @test() nounwind {
; RV32I-NEXT: lw a3, %lo(d)(s5)
; RV32I-NEXT: lw a4, %lo(e)(s6)
; RV32I-NEXT: li a5, 32
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: .LBB0_5: # %if.end
; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1
; RV32I-NEXT: lw a0, %lo(k)(s2)
@@ -83,7 +83,7 @@ define i32 @test() nounwind {
; RV32I-NEXT: lw a3, %lo(e)(s6)
; RV32I-NEXT: lw a4, %lo(f)(s7)
; RV32I-NEXT: li a5, 64
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: .LBB0_7: # %if.end5
; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1
; RV32I-NEXT: lw a0, %lo(j)(s3)
@@ -96,7 +96,7 @@ define i32 @test() nounwind {
; RV32I-NEXT: lw a3, %lo(f)(s7)
; RV32I-NEXT: lw a4, %lo(g)(s8)
; RV32I-NEXT: li a5, 32
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: .LBB0_9: # %if.end9
; RV32I-NEXT: # in Loop: Header=BB0_3 Depth=1
; RV32I-NEXT: lw a0, %lo(i)(s4)
@@ -109,7 +109,7 @@ define i32 @test() nounwind {
; RV32I-NEXT: lw a3, %lo(g)(s8)
; RV32I-NEXT: lw a4, %lo(h)(s9)
; RV32I-NEXT: li a5, 32
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: j .LBB0_2
; RV32I-NEXT: .LBB0_11: # %for.end
; RV32I-NEXT: li a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
index e03696419467..cd7bce868eae 100644
--- a/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
+++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-float-double.ll
@@ -18,9 +18,9 @@ define float @float_test(float %a, float %b) nounwind {
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IF-NEXT: mv s0, a1
-; RV32IF-NEXT: call __addsf3@plt
+; RV32IF-NEXT: call __addsf3
; RV32IF-NEXT: mv a1, s0
-; RV32IF-NEXT: call __divsf3@plt
+; RV32IF-NEXT: call __divsf3
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
@@ -32,9 +32,9 @@ define float @float_test(float %a, float %b) nounwind {
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT: mv s0, a1
-; RV64IF-NEXT: call __addsf3@plt
+; RV64IF-NEXT: call __addsf3
; RV64IF-NEXT: mv a1, s0
-; RV64IF-NEXT: call __divsf3@plt
+; RV64IF-NEXT: call __divsf3
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
@@ -53,10 +53,10 @@ define double @double_test(double %a, double %b) nounwind {
; RV32IF-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IF-NEXT: mv s0, a3
; RV32IF-NEXT: mv s1, a2
-; RV32IF-NEXT: call __adddf3@plt
+; RV32IF-NEXT: call __adddf3
; RV32IF-NEXT: mv a2, s1
; RV32IF-NEXT: mv a3, s0
-; RV32IF-NEXT: call __divdf3@plt
+; RV32IF-NEXT: call __divdf3
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -69,9 +69,9 @@ define double @double_test(double %a, double %b) nounwind {
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64IF-NEXT: mv s0, a1
-; RV64IF-NEXT: call __adddf3@plt
+; RV64IF-NEXT: call __adddf3
; RV64IF-NEXT: mv a1, s0
-; RV64IF-NEXT: call __divdf3@plt
+; RV64IF-NEXT: call __divdf3
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
index 0269bbe9ee33..99b111b10f66 100644
--- a/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
+++ b/llvm/test/CodeGen/RISCV/rv32i-rv64i-half.ll
@@ -21,20 +21,20 @@ define half @half_test(half %a, half %b) nounwind {
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s2, a1, -1
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: and a0, s0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __addsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __addsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: and a0, a0, s2
-; RV32I-NEXT: call __extendhfsf2@plt
+; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s0
-; RV32I-NEXT: call __divsf3@plt
-; RV32I-NEXT: call __truncsfhf2@plt
+; RV32I-NEXT: call __divsf3
+; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -53,20 +53,20 @@ define half @half_test(half %a, half %b) nounwind {
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addiw s2, a1, -1
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: and a0, s0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __addsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __addsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: and a0, a0, s2
-; RV64I-NEXT: call __extendhfsf2@plt
+; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s0
-; RV64I-NEXT: call __divsf3@plt
-; RV64I-NEXT: call __truncsfhf2@plt
+; RV64I-NEXT: call __divsf3
+; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
index 321d9c612336..3731b9719445 100644
--- a/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32xtheadbb.ll
@@ -42,7 +42,7 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -105,7 +105,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s3, a1, 257
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: srli a0, s2, 1
; RV32I-NEXT: or a0, s2, a0
@@ -129,7 +129,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s6
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s0, .LBB1_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srli a0, a0, 24
@@ -179,7 +179,7 @@ define i32 @cttz_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI2_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI2_0)
@@ -229,14 +229,14 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi s3, a1, 1329
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, %hi(.LCPI3_0)
; RV32I-NEXT: addi s4, a0, %lo(.LCPI3_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s2, .LBB3_3
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: li a0, 32
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index 5f9ca503bcb0..36c107061795 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -42,7 +42,7 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -105,7 +105,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s3, a1, 257
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: srli a0, s2, 1
; RV32I-NEXT: or a0, s2, a0
@@ -129,7 +129,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s6
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s0, .LBB1_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srli a0, a0, 24
@@ -179,7 +179,7 @@ define i32 @cttz_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi a1, a1, 1329
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: lui a1, %hi(.LCPI2_0)
; RV32I-NEXT: addi a1, a1, %lo(.LCPI2_0)
@@ -219,14 +219,14 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 30667
; RV32I-NEXT: addi s3, a1, 1329
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, %hi(.LCPI3_0)
; RV32I-NEXT: addi s4, a0, %lo(.LCPI3_0)
; RV32I-NEXT: neg a0, s2
; RV32I-NEXT: and a0, s2, a0
; RV32I-NEXT: mv a1, s3
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: bnez s2, .LBB3_3
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: li a0, 32
@@ -295,7 +295,7 @@ define i32 @ctpop_i32(i32 %a) nounwind {
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi a1, a1, 257
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -418,7 +418,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s1, a1, 257
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli s2, a0, 24
; RV32I-NEXT: srli a0, s0, 1
; RV32I-NEXT: and a0, a0, s3
@@ -431,7 +431,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s5
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a1, a0, 24
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -586,7 +586,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s1, a1, 257
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli s5, a0, 24
; RV32I-NEXT: srli a0, s0, 1
; RV32I-NEXT: and a0, a0, s2
@@ -599,7 +599,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s4
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: add a0, a0, s5
; RV32I-NEXT: li a1, 0
@@ -773,7 +773,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-NEXT: lui a1, 4112
; RV32I-NEXT: addi s1, a1, 257
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli s8, a0, 24
; RV32I-NEXT: srli a0, s6, 1
; RV32I-NEXT: and a0, a0, s3
@@ -786,7 +786,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s7
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: add s8, a0, s8
; RV32I-NEXT: srli a0, s5, 1
@@ -800,7 +800,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s7
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli s5, a0, 24
; RV32I-NEXT: srli a0, s2, 1
; RV32I-NEXT: and a0, a0, s3
@@ -813,7 +813,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: and a0, a0, s7
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call __mulsi3@plt
+; RV32I-NEXT: call __mulsi3
; RV32I-NEXT: srli a0, a0, 24
; RV32I-NEXT: add a0, a0, s5
; RV32I-NEXT: sw zero, 12(s0)
diff --git a/llvm/test/CodeGen/RISCV/rv64-large-stack.ll b/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
index 535550e83eeb..8bd99c0b639a 100644
--- a/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-large-stack.ll
@@ -14,7 +14,7 @@ define void @foo() nounwind {
; CHECK-NEXT: addi a0, a0, -2000
; CHECK-NEXT: sub sp, sp, a0
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: call baz@plt
+; CHECK-NEXT: call baz
; CHECK-NEXT: lui a0, 390625
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -2000
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
index 1ae2c1cfad68..17d9e9cefe11 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll
@@ -13,7 +13,7 @@ define i32 @udiv(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -34,7 +34,7 @@ define i32 @udiv_constant(i32 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -74,7 +74,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a1, a0, 32
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -91,7 +91,7 @@ define i32 @udiv_constant_lhs(i32 %a) nounwind {
define i64 @udiv64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: udiv64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __udivdi3@plt
+; RV64I-NEXT: tail __udivdi3
;
; RV64IM-LABEL: udiv64:
; RV64IM: # %bb.0:
@@ -105,7 +105,7 @@ define i64 @udiv64_constant(i64 %a) nounwind {
; RV64I-LABEL: udiv64_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: tail __udivdi3@plt
+; RV64I-NEXT: tail __udivdi3
;
; RV64IM-LABEL: udiv64_constant:
; RV64IM: # %bb.0:
@@ -125,7 +125,7 @@ define i64 @udiv64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __udivdi3@plt
+; RV64I-NEXT: tail __udivdi3
;
; RV64IM-LABEL: udiv64_constant_lhs:
; RV64IM: # %bb.0:
@@ -143,7 +143,7 @@ define i8 @udiv8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -165,7 +165,7 @@ define i8 @udiv8_constant(i8 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -204,7 +204,7 @@ define i8 @udiv8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a1, a0, 255
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -228,7 +228,7 @@ define i16 @udiv16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: addiw a2, a2, -1
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -253,7 +253,7 @@ define i16 @udiv16_constant(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a0, a0, 48
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -294,7 +294,7 @@ define i16 @udiv16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a1, a0, 48
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -317,7 +317,7 @@ define i32 @sdiv(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -337,7 +337,7 @@ define i32 @sdiv_constant(i32 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -403,7 +403,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -420,7 +420,7 @@ define i32 @sdiv_constant_lhs(i32 %a) nounwind {
define i64 @sdiv64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: sdiv64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64:
; RV64IM: # %bb.0:
@@ -434,7 +434,7 @@ define i64 @sdiv64_constant(i64 %a) nounwind {
; RV64I-LABEL: sdiv64_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64_constant:
; RV64IM: # %bb.0:
@@ -454,7 +454,7 @@ define i64 @sdiv64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64_constant_lhs:
; RV64IM: # %bb.0:
@@ -473,7 +473,7 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: tail __divdi3@plt
+; RV64I-NEXT: tail __divdi3
;
; RV64IM-LABEL: sdiv64_sext_operands:
; RV64IM: # %bb.0:
@@ -496,7 +496,7 @@ define i8 @sdiv8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: sraiw a1, a1, 24
; RV64I-NEXT: slli a0, a0, 24
; RV64I-NEXT: sraiw a0, a0, 24
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -521,7 +521,7 @@ define i8 @sdiv8_constant(i8 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 24
; RV64I-NEXT: sraiw a0, a0, 24
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -575,7 +575,7 @@ define i8 @sdiv8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 24
; RV64I-NEXT: sraiw a1, a0, 24
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -600,7 +600,7 @@ define i16 @sdiv16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: sraiw a1, a1, 16
; RV64I-NEXT: slli a0, a0, 16
; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -625,7 +625,7 @@ define i16 @sdiv16_constant(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 16
; RV64I-NEXT: sraiw a0, a0, 16
; RV64I-NEXT: li a1, 5
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -679,7 +679,7 @@ define i16 @sdiv16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 16
; RV64I-NEXT: sraiw a1, a0, 16
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
index 76ab0e7d5810..de4c21f32468 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll
@@ -325,7 +325,7 @@ define void @addi_fold_crash(i64 %arg) nounwind {
; RV64I-NEXT: add a0, a1, a0
; RV64I-NEXT: sb zero, 0(a0)
; RV64I-NEXT: mv a0, a1
-; RV64I-NEXT: call snork@plt
+; RV64I-NEXT: call snork
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
index 11adbbdd245f..9d7b77de03ee 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll
@@ -13,7 +13,7 @@ define i32 @urem(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -34,7 +34,7 @@ define i32 @urem_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a1, a0, 32
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -55,7 +55,7 @@ define i32 @srem(i32 %a, i32 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sext.w a1, a1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -121,7 +121,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -138,7 +138,7 @@ define i32 @srem_constant_lhs(i32 %a) nounwind {
define i64 @urem64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: urem64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __umoddi3@plt
+; RV64I-NEXT: tail __umoddi3
;
; RV64IM-LABEL: urem64:
; RV64IM: # %bb.0:
@@ -153,7 +153,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: tail __umoddi3@plt
+; RV64I-NEXT: tail __umoddi3
;
; RV64IM-LABEL: urem64_constant_lhs:
; RV64IM: # %bb.0:
@@ -167,7 +167,7 @@ define i64 @urem64_constant_lhs(i64 %a) nounwind {
define i64 @srem64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: srem64:
; RV64I: # %bb.0:
-; RV64I-NEXT: tail __moddi3@plt
+; RV64I-NEXT: tail __moddi3
;
; RV64IM-LABEL: srem64:
; RV64IM: # %bb.0:
@@ -182,7 +182,7 @@ define i64 @srem64_constant_lhs(i64 %a) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: tail __moddi3@plt
+; RV64I-NEXT: tail __moddi3
;
; RV64IM-LABEL: srem64_constant_lhs:
; RV64IM: # %bb.0:
@@ -200,7 +200,7 @@ define i8 @urem8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a0, a0, 255
; RV64I-NEXT: andi a1, a1, 255
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -222,7 +222,7 @@ define i8 @urem8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: andi a1, a0, 255
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -247,7 +247,7 @@ define i8 @srem8(i8 %a, i8 %b) nounwind {
; RV64I-NEXT: sraiw a1, a1, 24
; RV64I-NEXT: slli a0, a0, 24
; RV64I-NEXT: sraiw a0, a0, 24
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -272,7 +272,7 @@ define i8 @srem8_constant_lhs(i8 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 24
; RV64I-NEXT: sraiw a1, a0, 24
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -298,7 +298,7 @@ define i16 @urem16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: addiw a2, a2, -1
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -323,7 +323,7 @@ define i16 @urem16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srli a1, a0, 48
; RV64I-NEXT: li a0, 10
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -348,7 +348,7 @@ define i16 @srem16(i16 %a, i16 %b) nounwind {
; RV64I-NEXT: sraiw a1, a1, 16
; RV64I-NEXT: slli a0, a0, 16
; RV64I-NEXT: sraiw a0, a0, 16
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -373,7 +373,7 @@ define i16 @srem16_constant_lhs(i16 %a) nounwind {
; RV64I-NEXT: slli a0, a0, 16
; RV64I-NEXT: sraiw a1, a0, 16
; RV64I-NEXT: li a0, -10
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
index 3e2e6ac75af8..4ec7f2660b2a 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -97,7 +97,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -162,7 +162,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a1, a0, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
; RV64I-NEXT: subw a0, s0, a1
@@ -223,7 +223,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: xori a0, a0, 31
; RV64I-NEXT: snez a1, s0
@@ -290,7 +290,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -358,7 +358,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -388,7 +388,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
@@ -411,7 +411,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64XTHEADBB-NEXT: and a0, a0, a1
; RV64XTHEADBB-NEXT: lui a1, 30667
; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3@plt
+; RV64XTHEADBB-NEXT: call __muldi3
; RV64XTHEADBB-NEXT: srliw a0, a0, 27
; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI6_0)
; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI6_0)
@@ -440,7 +440,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI7_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI7_0)
@@ -458,7 +458,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
; RV64XTHEADBB-NEXT: and a0, a0, a1
; RV64XTHEADBB-NEXT: lui a1, 30667
; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3@plt
+; RV64XTHEADBB-NEXT: call __muldi3
; RV64XTHEADBB-NEXT: srliw a0, a0, 27
; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI7_0)
; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI7_0)
@@ -482,7 +482,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI8_0)
@@ -506,7 +506,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64XTHEADBB-NEXT: and a0, s0, a0
; RV64XTHEADBB-NEXT: lui a1, 30667
; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3@plt
+; RV64XTHEADBB-NEXT: call __muldi3
; RV64XTHEADBB-NEXT: srliw a0, a0, 27
; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI8_0)
; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI8_0)
@@ -536,7 +536,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI9_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
@@ -563,7 +563,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64XTHEADBB-NEXT: and a0, s0, a0
; RV64XTHEADBB-NEXT: lui a1, 30667
; RV64XTHEADBB-NEXT: addiw a1, a1, 1329
-; RV64XTHEADBB-NEXT: call __muldi3@plt
+; RV64XTHEADBB-NEXT: call __muldi3
; RV64XTHEADBB-NEXT: srliw a0, a0, 27
; RV64XTHEADBB-NEXT: lui a1, %hi(.LCPI9_0)
; RV64XTHEADBB-NEXT: addi a1, a1, %lo(.LCPI9_0)
@@ -599,7 +599,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
index 1170a3011b9b..9b3f206be4a0 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -95,7 +95,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -158,7 +158,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a1, a0, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
; RV64I-NEXT: subw a0, s0, a1
@@ -216,7 +216,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: xori a0, a0, 31
; RV64I-NEXT: snez a1, s0
@@ -281,7 +281,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -348,7 +348,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -378,7 +378,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
@@ -408,7 +408,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI7_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI7_0)
@@ -437,7 +437,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI8_0)
@@ -475,7 +475,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI9_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
@@ -521,7 +521,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
@@ -567,7 +567,7 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -605,7 +605,7 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -654,7 +654,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
index af1eb318cb46..2db8e2c9b3d1 100644
--- a/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll
@@ -374,7 +374,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB19_2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
;
; RV64ZBS-LABEL: bext_i32_trunc:
; RV64ZBS: # %bb.0:
@@ -383,7 +383,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
; RV64ZBS-NEXT: # %bb.1:
; RV64ZBS-NEXT: ret
; RV64ZBS-NEXT: .LBB19_2:
-; RV64ZBS-NEXT: tail bar@plt
+; RV64ZBS-NEXT: tail bar
%3 = shl i32 1, %1
%4 = and i32 %3, %0
%5 = icmp eq i32 %4, 0
diff --git a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
index 690828c77943..16f4119ef20b 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll
@@ -15,11 +15,11 @@ define i64 @complex_float_add(i64 %a.coerce, i64 %b.coerce) nounwind {
; CHECK-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; CHECK-NEXT: srli s0, a0, 32
; CHECK-NEXT: srli s1, a1, 32
-; CHECK-NEXT: call __addsf3@plt
+; CHECK-NEXT: call __addsf3
; CHECK-NEXT: mv s2, a0
; CHECK-NEXT: mv a0, s0
; CHECK-NEXT: mv a1, s1
-; CHECK-NEXT: call __addsf3@plt
+; CHECK-NEXT: call __addsf3
; CHECK-NEXT: slli a0, a0, 32
; CHECK-NEXT: slli s2, s2, 32
; CHECK-NEXT: srli a1, s2, 32
diff --git a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
index 25278caee64d..6fdf2a3a939c 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
@@ -15,7 +15,7 @@ define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunsdfsi@plt
+; RV64I-NEXT: call __fixunsdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -24,7 +24,7 @@ define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
; RV64IF: # %bb.0: # %entry
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call __fixunsdfsi@plt
+; RV64IF-NEXT: call __fixunsdfsi
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
@@ -38,7 +38,7 @@ define i32 @strict_fp64_to_si32(double %a) nounwind strictfp {
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixdfsi@plt
+; RV64I-NEXT: call __fixdfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -47,7 +47,7 @@ define i32 @strict_fp64_to_si32(double %a) nounwind strictfp {
; RV64IF: # %bb.0: # %entry
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IF-NEXT: call __fixdfsi@plt
+; RV64IF-NEXT: call __fixdfsi
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
index b7e112bbb7b5..b645b621c75c 100644
--- a/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
+++ b/llvm/test/CodeGen/RISCV/rv64i-single-softfloat.ll
@@ -13,7 +13,7 @@ define i32 @strict_fp32_to_ui32(float %a) nounwind strictfp {
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixunssfsi@plt
+; RV64I-NEXT: call __fixunssfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -27,7 +27,7 @@ define i32 @strict_fp32_to_si32(float %a) nounwind strictfp {
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT: call __fixsfsi@plt
+; RV64I-NEXT: call __fixsfsi
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
index c8cd710fe9ae..1f62ea9f5681 100644
--- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -97,7 +97,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -162,7 +162,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a1, a0, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
; RV64I-NEXT: sub a0, s0, a1
@@ -222,7 +222,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: xori a0, a0, 31
; RV64I-NEXT: snez a1, s0
@@ -289,7 +289,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -358,7 +358,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -388,7 +388,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
@@ -428,7 +428,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI7_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI7_0)
@@ -462,7 +462,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI8_0)
@@ -505,7 +505,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI9_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
@@ -552,7 +552,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 8123721ed316..2269d8d04c9c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -42,7 +42,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -95,7 +95,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -158,7 +158,7 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a1, a0, 24
; RV64I-NEXT: .LBB2_2: # %cond.end
; RV64I-NEXT: sub a0, s0, a1
@@ -216,7 +216,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: xori a0, a0, 31
; RV64I-NEXT: snez a1, s0
@@ -281,7 +281,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -348,7 +348,7 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -378,7 +378,7 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI6_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI6_0)
@@ -408,7 +408,7 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI7_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI7_0)
@@ -437,7 +437,7 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI8_0)
@@ -475,7 +475,7 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, s0, a0
; RV64I-NEXT: lui a1, 30667
; RV64I-NEXT: addiw a1, a1, 1329
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 27
; RV64I-NEXT: lui a1, %hi(.LCPI9_0)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI9_0)
@@ -518,7 +518,7 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, %hi(.LCPI10_0)
; RV64I-NEXT: ld a1, %lo(.LCPI10_0)(a1)
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 58
; RV64I-NEXT: lui a1, %hi(.LCPI10_1)
; RV64I-NEXT: addi a1, a1, %lo(.LCPI10_1)
@@ -564,7 +564,7 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -678,7 +678,7 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a1, a1, 257
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -727,7 +727,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw s1, a1, 257
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw s2, a0, 24
; RV64I-NEXT: srli a0, s0, 1
; RV64I-NEXT: and a0, a0, s3
@@ -740,7 +740,7 @@ define <2 x i32> @ctpop_v2i32(<2 x i32> %a) nounwind {
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: and a0, a0, s5
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a1, a0, 24
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
@@ -903,7 +903,7 @@ define i64 @ctpop_i64(i64 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: slli a2, a1, 32
; RV64I-NEXT: add a1, a1, a2
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a0, a0, 56
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
@@ -1034,7 +1034,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV64I-NEXT: slli a1, s1, 32
; RV64I-NEXT: add s1, s1, a1
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli s2, a0, 56
; RV64I-NEXT: srli a0, s0, 1
; RV64I-NEXT: and a0, a0, s3
@@ -1047,7 +1047,7 @@ define <2 x i64> @ctpop_v2i64(<2 x i64> %a) nounwind {
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: and a0, a0, s5
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call __muldi3@plt
+; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srli a1, a0, 56
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index 016b0924eaf1..d370b1877470 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -376,7 +376,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB19_2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
;
; RV64ZBS-LABEL: bext_i32_trunc:
; RV64ZBS: # %bb.0:
@@ -385,7 +385,7 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
; RV64ZBS-NEXT: # %bb.1:
; RV64ZBS-NEXT: ret
; RV64ZBS-NEXT: .LBB19_2:
-; RV64ZBS-NEXT: tail bar@plt
+; RV64ZBS-NEXT: tail bar
%3 = shl i32 1, %1
%4 = and i32 %3, %0
%5 = icmp eq i32 %4, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
index f807c7693332..83fc1fc994cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
@@ -53,7 +53,7 @@ body: |
bb.0 (%ir-block.0):
ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
$x10 = ADDI %stack.0.local0, 0
- PseudoCALL target-flags(riscv-plt) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
+ PseudoCALL target-flags(riscv-call) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
PseudoRET
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 94218455a984..661b79141fee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -313,7 +313,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV32-NEXT: vs8r.v v16, (a1)
; RV32-NEXT: vmv8r.v v8, v0
; RV32-NEXT: vmv8r.v v16, v24
-; RV32-NEXT: call ext2@plt
+; RV32-NEXT: call ext2
; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
@@ -347,7 +347,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_i32(<vsca
; RV64-NEXT: vs8r.v v16, (a1)
; RV64-NEXT: vmv8r.v v8, v0
; RV64-NEXT: vmv8r.v v16, v24
-; RV64-NEXT: call ext2@plt
+; RV64-NEXT: call ext2
; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
@@ -419,7 +419,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 128
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: call ext3@plt
+; RV32-NEXT: call ext3
; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
@@ -487,7 +487,7 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_call_nxv32i32_nxv32i32_nxv32i32_
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 128
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: call ext3@plt
+; RV64-NEXT: call ext3
; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
@@ -562,7 +562,7 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
; RV32-NEXT: vs8r.v v8, (a0)
; RV32-NEXT: li a0, 0
; RV32-NEXT: vmv.v.i v16, 0
-; RV32-NEXT: call vector_arg_indirect_stack@plt
+; RV32-NEXT: call vector_arg_indirect_stack
; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
@@ -613,7 +613,7 @@ define fastcc <vscale x 32 x i32> @pass_vector_arg_indirect_stack(<vscale x 32 x
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: li a0, 0
; RV64-NEXT: vmv.v.i v16, 0
-; RV64-NEXT: call vector_arg_indirect_stack@plt
+; RV64-NEXT: call vector_arg_indirect_stack
; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index a9f5f8570950..78385a80b47e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -45,7 +45,7 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: addi a0, sp, 128
; RV32-NEXT: vmv.v.i v16, 0
-; RV32-NEXT: call callee_scalable_vector_split_indirect@plt
+; RV32-NEXT: call callee_scalable_vector_split_indirect
; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 136(sp) # 4-byte Folded Reload
@@ -76,7 +76,7 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: addi a0, sp, 128
; RV64-NEXT: vmv.v.i v16, 0
-; RV64-NEXT: call callee_scalable_vector_split_indirect@plt
+; RV64-NEXT: call callee_scalable_vector_split_indirect
; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
index d531ff62a1d5..9ec15e5710f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
@@ -254,7 +254,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; LMULMAX8-NEXT: li a1, 2
; LMULMAX8-NEXT: vmv8r.v v8, v16
; LMULMAX8-NEXT: vmv8r.v v16, v24
-; LMULMAX8-NEXT: call ext2@plt
+; LMULMAX8-NEXT: call ext2
; LMULMAX8-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 16
; LMULMAX8-NEXT: ret
@@ -272,7 +272,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; LMULMAX4-NEXT: vmv4r.v v12, v20
; LMULMAX4-NEXT: vmv4r.v v16, v28
; LMULMAX4-NEXT: vmv4r.v v20, v24
-; LMULMAX4-NEXT: call ext2@plt
+; LMULMAX4-NEXT: call ext2
; LMULMAX4-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 16
; LMULMAX4-NEXT: ret
@@ -300,7 +300,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
; LMULMAX8-NEXT: li a2, 42
; LMULMAX8-NEXT: vse32.v v8, (a3)
; LMULMAX8-NEXT: vmv.v.v v8, v24
-; LMULMAX8-NEXT: call ext3@plt
+; LMULMAX8-NEXT: call ext3
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -330,7 +330,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
; LMULMAX4-NEXT: vse32.v v8, (a1)
; LMULMAX4-NEXT: vmv.v.v v8, v24
; LMULMAX4-NEXT: vmv.v.v v12, v28
-; LMULMAX4-NEXT: call ext3@plt
+; LMULMAX4-NEXT: call ext3
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -394,7 +394,7 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
; LMULMAX8-NEXT: vse32.v v8, (a0)
; LMULMAX8-NEXT: li a0, 0
; LMULMAX8-NEXT: vmv.v.i v16, 0
-; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt
+; LMULMAX8-NEXT: call vector_arg_indirect_stack
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -431,7 +431,7 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
; LMULMAX4-NEXT: vmv.v.i v12, 0
; LMULMAX4-NEXT: vmv.v.i v16, 0
; LMULMAX4-NEXT: vmv.v.i v20, 0
-; LMULMAX4-NEXT: call vector_arg_indirect_stack@plt
+; LMULMAX4-NEXT: call vector_arg_indirect_stack
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -501,7 +501,7 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
; LMULMAX8-NEXT: sd a0, 0(sp)
; LMULMAX8-NEXT: li a0, 0
; LMULMAX8-NEXT: vmv.v.i v16, 0
-; LMULMAX8-NEXT: call vector_arg_direct_stack@plt
+; LMULMAX8-NEXT: call vector_arg_direct_stack
; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 160
; LMULMAX8-NEXT: ret
@@ -538,7 +538,7 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
; LMULMAX4-NEXT: vmv.v.i v12, 0
; LMULMAX4-NEXT: vmv.v.i v16, 0
; LMULMAX4-NEXT: vmv.v.i v20, 0
-; LMULMAX4-NEXT: call vector_arg_direct_stack@plt
+; LMULMAX4-NEXT: call vector_arg_direct_stack
; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 160
; LMULMAX4-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
index 775fd4984b89..8e3a432b8ac8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll
@@ -693,7 +693,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
; LMULMAX8-NEXT: li a1, 2
; LMULMAX8-NEXT: vmv8r.v v8, v16
; LMULMAX8-NEXT: vmv8r.v v16, v24
-; LMULMAX8-NEXT: call ext2@plt
+; LMULMAX8-NEXT: call ext2
; LMULMAX8-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 16
; LMULMAX8-NEXT: ret
@@ -711,7 +711,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
; LMULMAX4-NEXT: vmv4r.v v12, v20
; LMULMAX4-NEXT: vmv4r.v v16, v28
; LMULMAX4-NEXT: vmv4r.v v20, v24
-; LMULMAX4-NEXT: call ext2@plt
+; LMULMAX4-NEXT: call ext2
; LMULMAX4-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 16
; LMULMAX4-NEXT: ret
@@ -735,7 +735,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
; LMULMAX2-NEXT: vmv2r.v v18, v28
; LMULMAX2-NEXT: vmv2r.v v20, v26
; LMULMAX2-NEXT: vmv2r.v v22, v24
-; LMULMAX2-NEXT: call ext2@plt
+; LMULMAX2-NEXT: call ext2
; LMULMAX2-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: addi sp, sp, 16
; LMULMAX2-NEXT: ret
@@ -771,7 +771,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_i32(<32 x i32> %x, <32 x i32> %
; LMULMAX1-NEXT: vmv1r.v v21, v26
; LMULMAX1-NEXT: vmv1r.v v22, v25
; LMULMAX1-NEXT: vmv1r.v v23, v24
-; LMULMAX1-NEXT: call ext2@plt
+; LMULMAX1-NEXT: call ext2
; LMULMAX1-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: addi sp, sp, 16
; LMULMAX1-NEXT: ret
@@ -799,7 +799,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; LMULMAX8-NEXT: li a2, 42
; LMULMAX8-NEXT: vse32.v v8, (a3)
; LMULMAX8-NEXT: vmv.v.v v8, v24
-; LMULMAX8-NEXT: call ext3@plt
+; LMULMAX8-NEXT: call ext3
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -829,7 +829,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; LMULMAX4-NEXT: vse32.v v8, (a1)
; LMULMAX4-NEXT: vmv.v.v v8, v24
; LMULMAX4-NEXT: vmv.v.v v12, v28
-; LMULMAX4-NEXT: call ext3@plt
+; LMULMAX4-NEXT: call ext3
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -869,7 +869,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; LMULMAX2-NEXT: vmv.v.v v10, v26
; LMULMAX2-NEXT: vmv.v.v v12, v28
; LMULMAX2-NEXT: vmv.v.v v14, v30
-; LMULMAX2-NEXT: call ext3@plt
+; LMULMAX2-NEXT: call ext3
; LMULMAX2-NEXT: addi sp, s0, -256
; LMULMAX2-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -936,7 +936,7 @@ define <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x, <32 x
; LMULMAX1-NEXT: vmv.v.v v13, v29
; LMULMAX1-NEXT: vmv.v.v v14, v30
; LMULMAX1-NEXT: vmv.v.v v15, v31
-; LMULMAX1-NEXT: call ext3@plt
+; LMULMAX1-NEXT: call ext3
; LMULMAX1-NEXT: addi sp, sp, 16
; LMULMAX1-NEXT: addi sp, s0, -256
; LMULMAX1-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
@@ -1043,7 +1043,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
; LMULMAX8-NEXT: vmv1r.v v10, v8
; LMULMAX8-NEXT: vmv1r.v v11, v8
; LMULMAX8-NEXT: vmv1r.v v12, v8
-; LMULMAX8-NEXT: call split_vector_args@plt
+; LMULMAX8-NEXT: call split_vector_args
; LMULMAX8-NEXT: addi sp, s0, -256
; LMULMAX8-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -1076,7 +1076,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
; LMULMAX4-NEXT: vmv1r.v v10, v8
; LMULMAX4-NEXT: vmv1r.v v11, v8
; LMULMAX4-NEXT: vmv1r.v v12, v8
-; LMULMAX4-NEXT: call split_vector_args@plt
+; LMULMAX4-NEXT: call split_vector_args
; LMULMAX4-NEXT: addi sp, s0, -256
; LMULMAX4-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -1116,7 +1116,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
; LMULMAX2-NEXT: vmv1r.v v11, v8
; LMULMAX2-NEXT: vmv1r.v v12, v8
; LMULMAX2-NEXT: vmv.v.v v22, v14
-; LMULMAX2-NEXT: call split_vector_args@plt
+; LMULMAX2-NEXT: call split_vector_args
; LMULMAX2-NEXT: addi sp, s0, -128
; LMULMAX2-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
@@ -1170,7 +1170,7 @@ define <32 x i32> @call_split_vector_args(ptr %pa, ptr %pb) {
; LMULMAX1-NEXT: vmv.v.v v21, v13
; LMULMAX1-NEXT: vmv.v.v v22, v14
; LMULMAX1-NEXT: vmv.v.v v23, v15
-; LMULMAX1-NEXT: call split_vector_args@plt
+; LMULMAX1-NEXT: call split_vector_args
; LMULMAX1-NEXT: addi sp, s0, -128
; LMULMAX1-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
@@ -1273,7 +1273,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
; LMULMAX8-NEXT: sd a0, 128(sp)
; LMULMAX8-NEXT: li a0, 0
; LMULMAX8-NEXT: vmv.v.i v16, 0
-; LMULMAX8-NEXT: call vector_arg_via_stack@plt
+; LMULMAX8-NEXT: call vector_arg_via_stack
; LMULMAX8-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 144
; LMULMAX8-NEXT: ret
@@ -1302,7 +1302,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
; LMULMAX4-NEXT: vmv.v.i v12, 0
; LMULMAX4-NEXT: vmv.v.i v16, 0
; LMULMAX4-NEXT: vmv.v.i v20, 0
-; LMULMAX4-NEXT: call vector_arg_via_stack@plt
+; LMULMAX4-NEXT: call vector_arg_via_stack
; LMULMAX4-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 144
; LMULMAX4-NEXT: ret
@@ -1339,7 +1339,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
; LMULMAX2-NEXT: vmv.v.i v18, 0
; LMULMAX2-NEXT: vmv.v.i v20, 0
; LMULMAX2-NEXT: vmv.v.i v22, 0
-; LMULMAX2-NEXT: call vector_arg_via_stack@plt
+; LMULMAX2-NEXT: call vector_arg_via_stack
; LMULMAX2-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: addi sp, sp, 144
; LMULMAX2-NEXT: ret
@@ -1392,7 +1392,7 @@ define <32 x i32> @pass_vector_arg_via_stack(<32 x i32> %x, <32 x i32> %y, <32 x
; LMULMAX1-NEXT: vmv.v.i v21, 0
; LMULMAX1-NEXT: vmv.v.i v22, 0
; LMULMAX1-NEXT: vmv.v.i v23, 0
-; LMULMAX1-NEXT: call vector_arg_via_stack@plt
+; LMULMAX1-NEXT: call vector_arg_via_stack
; LMULMAX1-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: addi sp, sp, 144
; LMULMAX1-NEXT: ret
@@ -1447,7 +1447,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
; LMULMAX8-NEXT: li a3, 0
; LMULMAX8-NEXT: li a4, 0
; LMULMAX8-NEXT: vmv8r.v v16, v8
-; LMULMAX8-NEXT: call vector_mask_arg_via_stack@plt
+; LMULMAX8-NEXT: call vector_mask_arg_via_stack
; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; LMULMAX8-NEXT: addi sp, sp, 160
; LMULMAX8-NEXT: ret
@@ -1487,7 +1487,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
; LMULMAX4-NEXT: vmv4r.v v12, v8
; LMULMAX4-NEXT: vmv4r.v v16, v8
; LMULMAX4-NEXT: vmv4r.v v20, v8
-; LMULMAX4-NEXT: call vector_mask_arg_via_stack@plt
+; LMULMAX4-NEXT: call vector_mask_arg_via_stack
; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; LMULMAX4-NEXT: addi sp, sp, 160
; LMULMAX4-NEXT: ret
@@ -1535,7 +1535,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
; LMULMAX2-NEXT: vmv2r.v v18, v8
; LMULMAX2-NEXT: vmv2r.v v20, v8
; LMULMAX2-NEXT: vmv2r.v v22, v8
-; LMULMAX2-NEXT: call vector_mask_arg_via_stack@plt
+; LMULMAX2-NEXT: call vector_mask_arg_via_stack
; LMULMAX2-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; LMULMAX2-NEXT: addi sp, sp, 160
; LMULMAX2-NEXT: ret
@@ -1599,7 +1599,7 @@ define <4 x i1> @pass_vector_mask_arg_via_stack(<4 x i1> %v) {
; LMULMAX1-NEXT: vmv1r.v v21, v8
; LMULMAX1-NEXT: vmv1r.v v22, v8
; LMULMAX1-NEXT: vmv1r.v v23, v8
-; LMULMAX1-NEXT: call vector_mask_arg_via_stack@plt
+; LMULMAX1-NEXT: call vector_mask_arg_via_stack
; LMULMAX1-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
; LMULMAX1-NEXT: addi sp, sp, 160
; LMULMAX1-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
index c261019e2e12..5fbfbc9f37ce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-emergency-slot.mir
@@ -18,7 +18,7 @@
; CHECK-NEXT: addi a1, sp, 24
; CHECK-NEXT: vs1r.v v25, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: ld a1, 0(sp)
- ; CHECK-NEXT: call fixedlen_vector_spillslot@plt
+ ; CHECK-NEXT: call fixedlen_vector_spillslot
; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 48
; CHECK-NEXT: ret
@@ -55,6 +55,6 @@ body: |
; we have to allocate a virtual register to compute it.
; A later run of the the register scavenger won't find an available register
; either so it will have to spill one to the emergency spill slot.
- PseudoCALL target-flags(riscv-plt) @fixedlen_vector_spillslot, csr_ilp32_lp64, implicit-def $x1, implicit-def $x2, implicit $x1, implicit $x5, implicit $x6, implicit $x7, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x28, implicit $x29, implicit $x30, implicit $x31
+ PseudoCALL target-flags(riscv-call) @fixedlen_vector_spillslot, csr_ilp32_lp64, implicit-def $x1, implicit-def $x2, implicit $x1, implicit $x5, implicit $x6, implicit $x7, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x28, implicit $x29, implicit $x30, implicit $x31
PseudoRET
...
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index 06d1ada300a1..e969da6fd45b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -616,7 +616,7 @@ define i32 @extractelt_v32i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV32NOM-NEXT: mv s2, a0
; RV32NOM-NEXT: andi a0, a1, 31
; RV32NOM-NEXT: li a1, 4
-; RV32NOM-NEXT: call __mulsi3@plt
+; RV32NOM-NEXT: call __mulsi3
; RV32NOM-NEXT: li a1, 32
; RV32NOM-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32NOM-NEXT: vle32.v v8, (s2)
@@ -666,7 +666,7 @@ define i32 @extractelt_v32i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV64NOM-NEXT: mv s2, a0
; RV64NOM-NEXT: andi a0, a1, 31
; RV64NOM-NEXT: li a1, 4
-; RV64NOM-NEXT: call __muldi3@plt
+; RV64NOM-NEXT: call __muldi3
; RV64NOM-NEXT: li a1, 32
; RV64NOM-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64NOM-NEXT: vle32.v v8, (s2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index 7c5047bbdf63..d55683e653d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -13,7 +13,7 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
@@ -49,7 +49,7 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
@@ -62,7 +62,7 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: add a2, sp, a2
@@ -112,7 +112,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
@@ -126,7 +126,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -142,7 +142,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -158,7 +158,7 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -218,7 +218,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
@@ -232,7 +232,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -248,7 +248,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -264,7 +264,7 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -325,7 +325,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 68(sp)
; RV32-NEXT: sw a0, 64(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -333,7 +333,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 124(sp)
; RV32-NEXT: sw a0, 120(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -341,7 +341,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 6
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 116(sp)
; RV32-NEXT: sw a0, 112(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -349,7 +349,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 5
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 108(sp)
; RV32-NEXT: sw a0, 104(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -357,7 +357,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 4
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 100(sp)
; RV32-NEXT: sw a0, 96(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -365,7 +365,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 92(sp)
; RV32-NEXT: sw a0, 88(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -373,7 +373,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 84(sp)
; RV32-NEXT: sw a0, 80(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -381,7 +381,7 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 76(sp)
; RV32-NEXT: sw a0, 72(sp)
; RV32-NEXT: addi a0, sp, 64
@@ -471,42 +471,42 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: flw fa0, 124(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 252(sp)
; RV32-NEXT: sw a0, 248(sp)
; RV32-NEXT: flw fa0, 120(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 244(sp)
; RV32-NEXT: sw a0, 240(sp)
; RV32-NEXT: flw fa0, 116(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 236(sp)
; RV32-NEXT: sw a0, 232(sp)
; RV32-NEXT: flw fa0, 112(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 228(sp)
; RV32-NEXT: sw a0, 224(sp)
; RV32-NEXT: flw fa0, 108(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 220(sp)
; RV32-NEXT: sw a0, 216(sp)
; RV32-NEXT: flw fa0, 104(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 212(sp)
; RV32-NEXT: sw a0, 208(sp)
; RV32-NEXT: flw fa0, 100(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 204(sp)
; RV32-NEXT: sw a0, 200(sp)
; RV32-NEXT: flw fa0, 96(sp)
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 196(sp)
; RV32-NEXT: sw a0, 192(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 132(sp)
; RV32-NEXT: sw a0, 128(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -514,7 +514,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 156(sp)
; RV32-NEXT: sw a0, 152(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -522,7 +522,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 148(sp)
; RV32-NEXT: sw a0, 144(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -530,7 +530,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 140(sp)
; RV32-NEXT: sw a0, 136(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -538,7 +538,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 188(sp)
; RV32-NEXT: sw a0, 184(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -546,7 +546,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 6
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 180(sp)
; RV32-NEXT: sw a0, 176(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -554,7 +554,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 5
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 172(sp)
; RV32-NEXT: sw a0, 168(sp)
; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -562,7 +562,7 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 4
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrintf@plt
+; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 164(sp)
; RV32-NEXT: sw a0, 160(sp)
; RV32-NEXT: li a0, 32
@@ -668,7 +668,7 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
@@ -703,7 +703,7 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
@@ -716,7 +716,7 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: add a2, sp, a2
@@ -766,7 +766,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vmv.v.x v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
@@ -780,7 +780,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -796,7 +796,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -812,7 +812,7 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -875,26 +875,26 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: fld fa0, 120(sp)
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 188(sp)
; RV32-NEXT: sw a0, 184(sp)
; RV32-NEXT: fld fa0, 112(sp)
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 180(sp)
; RV32-NEXT: sw a0, 176(sp)
; RV32-NEXT: fld fa0, 104(sp)
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 172(sp)
; RV32-NEXT: sw a0, 168(sp)
; RV32-NEXT: fld fa0, 96(sp)
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 164(sp)
; RV32-NEXT: sw a0, 160(sp)
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 132(sp)
; RV32-NEXT: sw a0, 128(sp)
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -902,7 +902,7 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 140(sp)
; RV32-NEXT: sw a0, 136(sp)
; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
@@ -910,7 +910,7 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 156(sp)
; RV32-NEXT: sw a0, 152(sp)
; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
@@ -918,7 +918,7 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
-; RV32-NEXT: call llrint@plt
+; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 148(sp)
; RV32-NEXT: sw a0, 144(sp)
; RV32-NEXT: addi a0, sp, 128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 4e576f12e107..34339a6f613f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -1429,7 +1429,7 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl)
; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -1450,7 +1450,7 @@ define i8 @vpreduce_mul_v1i8(i8 %s, <1 x i8> %v, <1 x i1> %m, i32 zeroext %evl)
; RV64-NEXT: vmerge.vvm v8, v9, v8, v0
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -1479,7 +1479,7 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a0, a0, 24
; RV32-NEXT: srai a0, a0, 24
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1504,7 +1504,7 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a0, a0, 56
; RV64-NEXT: srai a0, a0, 56
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1537,7 +1537,7 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a0, a0, 24
; RV32-NEXT: srai a0, a0, 24
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1564,7 +1564,7 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a0, a0, 56
; RV64-NEXT: srai a0, a0, 56
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1599,7 +1599,7 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a0, a0, 24
; RV32-NEXT: srai a0, a0, 24
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1628,7 +1628,7 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a0, a0, 56
; RV64-NEXT: srai a0, a0, 56
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1665,7 +1665,7 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a0, a0, 24
; RV32-NEXT: srai a0, a0, 24
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1696,7 +1696,7 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a0, a0, 56
; RV64-NEXT: srai a0, a0, 56
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1736,7 +1736,7 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m,
; RV32-NEXT: vmul.vv v8, v8, v10
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a0, a0, 24
; RV32-NEXT: srai a0, a0, 24
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1770,7 +1770,7 @@ define signext i8 @vpreduce_mul_v32i8(i8 signext %s, <32 x i8> %v, <32 x i1> %m,
; RV64-NEXT: vmul.vv v8, v8, v10
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a0, a0, 56
; RV64-NEXT: srai a0, a0, 56
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1820,7 +1820,7 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
; RV32-NEXT: vmul.vv v8, v8, v12
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a0, a0, 24
; RV32-NEXT: srai a0, a0, 24
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -1864,7 +1864,7 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
; RV64-NEXT: vmul.vv v8, v8, v12
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a0, a0, 56
; RV64-NEXT: srai a0, a0, 56
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index f1a82b9e427e..783738f918d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -352,17 +352,17 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 16(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: lui a1, 524288
; CHECK-NOV-NEXT: addiw a4, a1, -1
@@ -446,14 +446,14 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: lhu s2, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -466,7 +466,7 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -482,7 +482,7 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -540,17 +540,17 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 8(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.lu.s s1, fs2, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-NOV-NEXT: li a1, -1
; CHECK-NOV-NEXT: srli a1, a1, 32
@@ -614,14 +614,14 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: lhu s2, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -634,7 +634,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -650,7 +650,7 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -706,17 +706,17 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 8(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: li a2, -1
; CHECK-NOV-NEXT: srli a2, a2, 32
@@ -792,14 +792,14 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: lhu s2, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -812,7 +812,7 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -828,7 +828,7 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1236,29 +1236,29 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 48(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs6, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s7
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs5, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s6
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs4, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s5
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs3, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s4
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: lui a7, 8
; CHECK-NOV-NEXT: addiw a7, a7, -1
@@ -1416,14 +1416,14 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: lhu s6, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1432,7 +1432,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1441,7 +1441,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s4
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1450,7 +1450,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 3
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1459,7 +1459,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1468,7 +1468,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 5
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1477,7 +1477,7 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 6
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1556,29 +1556,29 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 8(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs6, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s7
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs5, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s6
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs4, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s5
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs3, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s4
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.lu.s s1, fs6, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-NOV-NEXT: lui a1, 16
; CHECK-NOV-NEXT: addiw a1, a1, -1
@@ -1694,14 +1694,14 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: lhu s6, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1710,7 +1710,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1719,7 +1719,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s4
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1728,7 +1728,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 3
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1737,7 +1737,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1746,7 +1746,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 5
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1755,7 +1755,7 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 6
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -1832,29 +1832,29 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 8(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs6, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s7
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs5, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s6
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs4, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s5
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs3, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s4
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: lui a3, 16
; CHECK-NOV-NEXT: addiw a3, a3, -1
@@ -1994,14 +1994,14 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: lhu s6, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -2010,7 +2010,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -2019,7 +2019,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s4
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -2028,7 +2028,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 3
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -2037,7 +2037,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -2046,7 +2046,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 5
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -2055,7 +2055,7 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 6
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -2107,11 +2107,11 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.d fs0, fa1
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.d fa0, fs0
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv a2, a0
; CHECK-NOV-NEXT: li a0, -1
; CHECK-NOV-NEXT: srli a3, a0, 1
@@ -2192,14 +2192,14 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: li a2, -1
; CHECK-V-NEXT: srli a3, a2, 1
; CHECK-V-NEXT: beqz a1, .LBB18_3
@@ -2287,11 +2287,11 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.d fs0, fa1
-; CHECK-NOV-NEXT: call __fixunsdfti@plt
+; CHECK-NOV-NEXT: call __fixunsdfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.d fa0, fs0
-; CHECK-NOV-NEXT: call __fixunsdfti@plt
+; CHECK-NOV-NEXT: call __fixunsdfti
; CHECK-NOV-NEXT: snez a1, a1
; CHECK-NOV-NEXT: snez a2, s1
; CHECK-NOV-NEXT: addi a2, a2, -1
@@ -2325,14 +2325,14 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixunsdfti@plt
+; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixunsdfti@plt
+; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: snez a1, a1
; CHECK-V-NEXT: snez a2, s1
; CHECK-V-NEXT: addi a2, a2, -1
@@ -2373,11 +2373,11 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.d fs0, fa1
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.d fa0, fs0
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv a2, s1
; CHECK-NOV-NEXT: blez s1, .LBB20_2
; CHECK-NOV-NEXT: # %bb.1: # %entry
@@ -2437,14 +2437,14 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv a2, s1
; CHECK-V-NEXT: blez s1, .LBB20_2
; CHECK-V-NEXT: # %bb.1: # %entry
@@ -2514,11 +2514,11 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.s fs0, fa1
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.s fa0, fs0
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, a0
; CHECK-NOV-NEXT: li a0, -1
; CHECK-NOV-NEXT: srli a3, a0, 1
@@ -2599,14 +2599,14 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
; CHECK-V-NEXT: srli a3, a2, 1
; CHECK-V-NEXT: beqz a1, .LBB21_3
@@ -2694,11 +2694,11 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.s fs0, fa1
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.s fa0, fs0
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: snez a1, a1
; CHECK-NOV-NEXT: snez a2, s1
; CHECK-NOV-NEXT: addi a2, a2, -1
@@ -2732,14 +2732,14 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: snez a1, a1
; CHECK-V-NEXT: snez a2, s1
; CHECK-V-NEXT: addi a2, a2, -1
@@ -2780,11 +2780,11 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.s fs0, fa1
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.s fa0, fs0
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, s1
; CHECK-NOV-NEXT: blez s1, .LBB23_2
; CHECK-NOV-NEXT: # %bb.1: # %entry
@@ -2844,14 +2844,14 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, s1
; CHECK-V-NEXT: blez s1, .LBB23_2
; CHECK-V-NEXT: # %bb.1: # %entry
@@ -2922,13 +2922,13 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_offset s2, -32
; CHECK-NOV-NEXT: mv s2, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, a0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, a0
; CHECK-NOV-NEXT: li a0, -1
; CHECK-NOV-NEXT: srli a3, a0, 1
@@ -3004,13 +3004,13 @@ define <2 x i64> @stest_f16i64(<2 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: mv s2, a1
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
; CHECK-V-NEXT: srli a3, a2, 1
; CHECK-V-NEXT: beqz a1, .LBB24_3
@@ -3097,13 +3097,13 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_offset s2, -32
; CHECK-NOV-NEXT: mv s0, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, a0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: mv s1, a0
; CHECK-NOV-NEXT: mv s2, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, s0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: snez a1, a1
; CHECK-NOV-NEXT: snez a2, s2
; CHECK-NOV-NEXT: addi a2, a2, -1
@@ -3132,13 +3132,13 @@ define <2 x i64> @utesth_f16i64(<2 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: mv s0, a1
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s1, a0
; CHECK-V-NEXT: mv s2, a1
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: snez a1, a1
; CHECK-V-NEXT: snez a2, s2
; CHECK-V-NEXT: addi a2, a2, -1
@@ -3178,13 +3178,13 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_offset s2, -32
; CHECK-NOV-NEXT: mv s2, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, a0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, s1
; CHECK-NOV-NEXT: blez s1, .LBB26_2
; CHECK-NOV-NEXT: # %bb.1: # %entry
@@ -3239,13 +3239,13 @@ define <2 x i64> @ustest_f16i64(<2 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: mv s2, a1
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, s1
; CHECK-V-NEXT: blez s1, .LBB26_2
; CHECK-V-NEXT: # %bb.1: # %entry
@@ -3639,17 +3639,17 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 16(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: lui a1, 524288
; CHECK-NOV-NEXT: addiw a4, a1, -1
@@ -3733,14 +3733,14 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: lhu s2, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -3753,7 +3753,7 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -3769,7 +3769,7 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -3825,17 +3825,17 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 8(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.lu.s s1, fs2, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-NOV-NEXT: li a1, -1
; CHECK-NOV-NEXT: srli a1, a1, 32
@@ -3899,14 +3899,14 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: lhu s2, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -3919,7 +3919,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -3935,7 +3935,7 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -3990,17 +3990,17 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 16(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs2, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: li a2, -1
; CHECK-NOV-NEXT: srli a2, a2, 32
@@ -4076,14 +4076,14 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: lhu s2, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4096,7 +4096,7 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e64, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4112,7 +4112,7 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4508,29 +4508,29 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 48(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs6, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s7
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs5, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s6
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs4, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s5
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs3, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s4
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: lui a7, 8
; CHECK-NOV-NEXT: addiw a7, a7, -1
@@ -4688,14 +4688,14 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: lhu s6, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4704,7 +4704,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4713,7 +4713,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s4
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4722,7 +4722,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 3
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4731,7 +4731,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4740,7 +4740,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 5
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4749,7 +4749,7 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 6
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4826,31 +4826,31 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 8(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs5, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s7
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs6, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s6
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs4, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s5
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs3, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s4
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fcvt.lu.s s2, fs6, rtz
; CHECK-NOV-NEXT: fcvt.lu.s a0, fs5, rtz
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: sext.w s1, a0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-NOV-NEXT: sext.w a0, a0
; CHECK-NOV-NEXT: lui a1, 16
@@ -4962,14 +4962,14 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: lhu s6, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4978,7 +4978,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4987,7 +4987,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s4
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -4996,7 +4996,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 3
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5005,7 +5005,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5014,7 +5014,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 5
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5023,7 +5023,7 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 6
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5099,29 +5099,29 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-NOV-NEXT: lhu a1, 48(a1)
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs6, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s7
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs5, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s6
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs4, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s5
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs3, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s4
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs2, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s3
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs1, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.w.x fa0, s1
; CHECK-NOV-NEXT: fcvt.l.s s1, fs6, rtz
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-NOV-NEXT: lui a3, 16
; CHECK-NOV-NEXT: addiw a3, a3, -1
@@ -5261,14 +5261,14 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: lhu s6, 0(a0)
; CHECK-V-NEXT: lhu a0, 8(a0)
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s6
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 2, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5277,7 +5277,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v8, v10, 1
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s5
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 3, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5286,7 +5286,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s4
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5295,7 +5295,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 3
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s3
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 5, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5304,7 +5304,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5313,7 +5313,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 5
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5322,7 +5322,7 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: vslideup.vi v10, v8, 6
; CHECK-V-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
+; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
@@ -5372,11 +5372,11 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.d fs0, fa1
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.d fa0, fs0
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv a2, a0
; CHECK-NOV-NEXT: li a0, -1
; CHECK-NOV-NEXT: srli a3, a0, 1
@@ -5460,14 +5460,14 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: li a2, -1
; CHECK-V-NEXT: srli a3, a2, 1
; CHECK-V-NEXT: beqz a1, .LBB45_2
@@ -5557,11 +5557,11 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.d fs0, fa0
; CHECK-NOV-NEXT: fmv.d fa0, fa1
-; CHECK-NOV-NEXT: call __fixunsdfti@plt
+; CHECK-NOV-NEXT: call __fixunsdfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.d fa0, fs0
-; CHECK-NOV-NEXT: call __fixunsdfti@plt
+; CHECK-NOV-NEXT: call __fixunsdfti
; CHECK-NOV-NEXT: snez a1, a1
; CHECK-NOV-NEXT: addi a1, a1, -1
; CHECK-NOV-NEXT: and a0, a1, a0
@@ -5593,7 +5593,7 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixunsdfti@plt
+; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -5601,7 +5601,7 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixunsdfti@plt
+; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: snez a1, a1
; CHECK-V-NEXT: addi a1, a1, -1
; CHECK-V-NEXT: and a0, a1, a0
@@ -5641,11 +5641,11 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.d fs0, fa1
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.d fa0, fs0
-; CHECK-NOV-NEXT: call __fixdfti@plt
+; CHECK-NOV-NEXT: call __fixdfti
; CHECK-NOV-NEXT: mv a2, a1
; CHECK-NOV-NEXT: blez a1, .LBB47_2
; CHECK-NOV-NEXT: # %bb.1: # %entry
@@ -5694,14 +5694,14 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixdfti@plt
+; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv a2, a1
; CHECK-V-NEXT: blez a1, .LBB47_2
; CHECK-V-NEXT: # %bb.1: # %entry
@@ -5758,11 +5758,11 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.s fs0, fa1
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.s fa0, fs0
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, a0
; CHECK-NOV-NEXT: li a0, -1
; CHECK-NOV-NEXT: srli a3, a0, 1
@@ -5846,14 +5846,14 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
; CHECK-V-NEXT: srli a3, a2, 1
; CHECK-V-NEXT: beqz a1, .LBB48_2
@@ -5943,11 +5943,11 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.s fs0, fa0
; CHECK-NOV-NEXT: fmv.s fa0, fa1
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.s fa0, fs0
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: snez a1, a1
; CHECK-NOV-NEXT: addi a1, a1, -1
; CHECK-NOV-NEXT: and a0, a1, a0
@@ -5979,7 +5979,7 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
@@ -5987,7 +5987,7 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: snez a1, a1
; CHECK-V-NEXT: addi a1, a1, -1
; CHECK-V-NEXT: and a0, a1, a0
@@ -6027,11 +6027,11 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
; CHECK-NOV-NEXT: .cfi_offset s1, -24
; CHECK-NOV-NEXT: .cfi_offset fs0, -32
; CHECK-NOV-NEXT: fmv.s fs0, fa1
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.s fa0, fs0
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, a1
; CHECK-NOV-NEXT: blez a1, .LBB50_2
; CHECK-NOV-NEXT: # %bb.1: # %entry
@@ -6080,14 +6080,14 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v9, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v9
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vfmv.f.s fa0, v8
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, a1
; CHECK-V-NEXT: blez a1, .LBB50_2
; CHECK-V-NEXT: # %bb.1: # %entry
@@ -6145,13 +6145,13 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_offset s2, -32
; CHECK-NOV-NEXT: mv s2, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, a0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, a0
; CHECK-NOV-NEXT: li a0, -1
; CHECK-NOV-NEXT: srli a3, a0, 1
@@ -6230,13 +6230,13 @@ define <2 x i64> @stest_f16i64_mm(<2 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: mv s2, a1
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
; CHECK-V-NEXT: srli a3, a2, 1
; CHECK-V-NEXT: beqz a1, .LBB51_2
@@ -6324,13 +6324,13 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_offset s2, -32
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: fmv.w.x fa0, a1
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: mv s1, a0
; CHECK-NOV-NEXT: mv s2, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, s0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixunssfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixunssfti
; CHECK-NOV-NEXT: snez a1, a1
; CHECK-NOV-NEXT: addi a1, a1, -1
; CHECK-NOV-NEXT: and a0, a1, a0
@@ -6358,13 +6358,13 @@ define <2 x i64> @utesth_f16i64_mm(<2 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: fmv.w.x fa0, a1
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s1, a0
; CHECK-V-NEXT: mv s2, a1
; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixunssfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: snez a1, a1
; CHECK-V-NEXT: addi a1, a1, -1
; CHECK-V-NEXT: and a0, a1, a0
@@ -6403,13 +6403,13 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
; CHECK-NOV-NEXT: .cfi_offset s2, -32
; CHECK-NOV-NEXT: mv s2, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, a0
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv s0, a0
; CHECK-NOV-NEXT: mv s1, a1
; CHECK-NOV-NEXT: fmv.w.x fa0, s2
-; CHECK-NOV-NEXT: call __extendhfsf2@plt
-; CHECK-NOV-NEXT: call __fixsfti@plt
+; CHECK-NOV-NEXT: call __extendhfsf2
+; CHECK-NOV-NEXT: call __fixsfti
; CHECK-NOV-NEXT: mv a2, a1
; CHECK-NOV-NEXT: blez a1, .LBB53_2
; CHECK-NOV-NEXT: # %bb.1: # %entry
@@ -6453,13 +6453,13 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: mv s2, a1
; CHECK-V-NEXT: fmv.w.x fa0, a0
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: call __extendhfsf2@plt
-; CHECK-V-NEXT: call __fixsfti@plt
+; CHECK-V-NEXT: call __extendhfsf2
+; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, a1
; CHECK-V-NEXT: blez a1, .LBB53_2
; CHECK-V-NEXT: # %bb.1: # %entry
diff --git a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
index de44fbc04e3a..b4d8805b65bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/large-rvv-stack-size.mir
@@ -32,7 +32,7 @@
; CHECK-NEXT: addi a0, a0, 241
; CHECK-NEXT: vs1r.v v25, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: ld a0, 8(sp)
- ; CHECK-NEXT: call spillslot@plt
+ ; CHECK-NEXT: call spillslot
; CHECK-NEXT: addi sp, s0, -2048
; CHECK-NEXT: addi sp, sp, -256
; CHECK-NEXT: addi sp, sp, 272
@@ -87,6 +87,6 @@ body: |
; A later run of the the register scavenger won't find available registers
; either so it will have to spill two to the emergency spill slots
; required for this RVV computation.
- PseudoCALL target-flags(riscv-plt) @spillslot, csr_ilp32_lp64, implicit-def $x1, implicit-def $x2, implicit $x1, implicit $x5, implicit $x6, implicit $x7, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x28, implicit $x29, implicit $x30, implicit $x31
+ PseudoCALL target-flags(riscv-call) @spillslot, csr_ilp32_lp64, implicit-def $x1, implicit-def $x2, implicit $x1, implicit $x5, implicit $x6, implicit $x7, implicit $x10, implicit $x11, implicit $x12, implicit $x13, implicit $x14, implicit $x15, implicit $x16, implicit $x17, implicit $x28, implicit $x29, implicit $x30, implicit $x31
PseudoRET
...
diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
index 8c9a749d5ea1..1ee88f897b6e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll
@@ -215,7 +215,7 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
; RV64IV-NEXT: slli s1, s1, 1
; RV64IV-NEXT: sub s1, s0, s1
; RV64IV-NEXT: addi s1, s1, -32
-; RV64IV-NEXT: call notdead@plt
+; RV64IV-NEXT: call notdead
; RV64IV-NEXT: vl2r.v v8, (s1)
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 2
@@ -270,7 +270,7 @@ define void @local_var_m2_with_bp(i64 %n) {
; RV64IV-NEXT: slli s2, s2, 1
; RV64IV-NEXT: add s2, s1, s2
; RV64IV-NEXT: addi s2, s2, 224
-; RV64IV-NEXT: call notdead2@plt
+; RV64IV-NEXT: call notdead2
; RV64IV-NEXT: lw zero, 124(s1)
; RV64IV-NEXT: vl2r.v v8, (s2)
; RV64IV-NEXT: addi a0, s1, 224
diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
index 5cd9c374ed41..bdfec92d2305 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll
@@ -58,7 +58,7 @@ define <vscale x 64 x i8> @caller() {
; RV64IV-NEXT: addi a1, sp, 64
; RV64IV-NEXT: addi a0, sp, 64
; RV64IV-NEXT: vs8r.v v24, (a1)
-; RV64IV-NEXT: call callee@plt
+; RV64IV-NEXT: call callee
; RV64IV-NEXT: addi sp, s0, -80
; RV64IV-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
index 705ec2df126b..47b88ba71d55 100644
--- a/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/no-reserved-frame.ll
@@ -38,7 +38,7 @@ define signext i32 @foo(i32 signext %aa) #0 {
; CHECK-NEXT: addi a1, s1, 48
; CHECK-NEXT: sd t1, 0(sp)
; CHECK-NEXT: mv a0, t0
-; CHECK-NEXT: call gfunc@plt
+; CHECK-NEXT: call gfunc
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: li a0, 0
; CHECK-NEXT: addi sp, s0, -96
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
index 65dca0daed8c..c27488b18a01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
@@ -14,16 +14,16 @@ define <4 x float> @foo(ptr %0) nounwind {
; CHECK-NEXT: lhu s2, 0(a0)
; CHECK-NEXT: lhu a0, 2(a0)
; CHECK-NEXT: fmv.w.x fa0, a0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 8(sp)
; CHECK-NEXT: fmv.w.x fa0, s2
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 0(sp)
; CHECK-NEXT: fmv.w.x fa0, s1
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 12(sp)
; CHECK-NEXT: fmv.w.x fa0, s0
-; CHECK-NEXT: call __extendhfsf2@plt
+; CHECK-NEXT: call __extendhfsf2
; CHECK-NEXT: fsw fa0, 4(sp)
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
index 978d1c8fb7b0..600ac594380b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/reg-alloc-reserve-bp.ll
@@ -38,7 +38,7 @@ define void @foo(ptr nocapture noundef %p1) {
; CHECK-NEXT: li a6, 7
; CHECK-NEXT: li a7, 8
; CHECK-NEXT: sd t0, 0(sp)
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (s2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index c44f5ebcde48..129fbcfb8832 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -28,7 +28,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O0-NEXT: lui a0, %hi(.L.str)
; SPILL-O0-NEXT: addi a0, a0, %lo(.L.str)
-; SPILL-O0-NEXT: call puts@plt
+; SPILL-O0-NEXT: call puts
; SPILL-O0-NEXT: addi a1, sp, 16
; SPILL-O0-NEXT: vl1r.v v10, (a1) # Unknown-size Folded Reload
; SPILL-O0-NEXT: csrr a1, vlenb
@@ -66,7 +66,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str)
-; SPILL-O2-NEXT: call puts@plt
+; SPILL-O2-NEXT: call puts
; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: add a0, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index 8beffdd04f98..34eb58ee4d1c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -31,7 +31,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; SPILL-O0-NEXT: lui a0, %hi(.L.str)
; SPILL-O0-NEXT: addi a0, a0, %lo(.L.str)
-; SPILL-O0-NEXT: call puts@plt
+; SPILL-O0-NEXT: call puts
; SPILL-O0-NEXT: addi a1, sp, 32
; SPILL-O0-NEXT: vl1r.v v10, (a1) # Unknown-size Folded Reload
; SPILL-O0-NEXT: csrr a1, vlenb
@@ -69,7 +69,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
; SPILL-O2-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str)
-; SPILL-O2-NEXT: call puts@plt
+; SPILL-O2-NEXT: call puts
; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: add a0, sp, a0
@@ -101,7 +101,7 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
; SPILL-O2-VLEN128-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-VLEN128-NEXT: addi a0, a0, %lo(.L.str)
-; SPILL-O2-VLEN128-NEXT: call puts@plt
+; SPILL-O2-VLEN128-NEXT: call puts
; SPILL-O2-VLEN128-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 32
; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
index 27a5059e5a9e..c164b62a679b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
@@ -48,7 +48,7 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
; CHECK-NEXT: sd t0, 8(sp)
; CHECK-NEXT: sd t1, 0(sp)
; CHECK-NEXT: vmv8r.v v16, v8
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: addi sp, s0, -96
; CHECK-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
index 686401da41a4..d98e18b22f69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
@@ -24,7 +24,7 @@
; RV32-NEXT: addi a0, sp, 32
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: addi a2, sp, 8
- ; RV32-NEXT: call extern@plt
+ ; RV32-NEXT: call extern
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add sp, sp, a0
@@ -42,7 +42,7 @@
; RV64-NEXT: addi a0, sp, 32
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: addi a2, sp, 8
- ; RV64-NEXT: call extern@plt
+ ; RV64-NEXT: call extern
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: add sp, sp, a0
@@ -67,7 +67,7 @@
; RV32-NEXT: addi a0, sp, 32
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: addi a2, sp, 8
- ; RV32-NEXT: call extern@plt
+ ; RV32-NEXT: call extern
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add sp, sp, a0
@@ -85,7 +85,7 @@
; RV64-NEXT: addi a0, sp, 32
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: addi a2, sp, 8
- ; RV64-NEXT: call extern@plt
+ ; RV64-NEXT: call extern
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: add sp, sp, a0
@@ -113,7 +113,7 @@
; RV32-NEXT: addi a0, sp, 32
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: addi a2, sp, 8
- ; RV32-NEXT: call extern@plt
+ ; RV32-NEXT: call extern
; RV32-NEXT: addi sp, s0, -48
; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
@@ -133,7 +133,7 @@
; RV64-NEXT: addi a0, sp, 64
; RV64-NEXT: addi a1, sp, 40
; RV64-NEXT: addi a2, sp, 32
- ; RV64-NEXT: call extern@plt
+ ; RV64-NEXT: call extern
; RV64-NEXT: addi sp, s0, -80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
@@ -188,7 +188,7 @@ body: |
$x10 = ADDI %stack.0.a, 0
$x11 = ADDI %stack.1.b, 0
$x12 = ADDI %stack.2.c, 0
- PseudoCALL target-flags(riscv-plt) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
+ PseudoCALL target-flags(riscv-call) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
PseudoRET
@@ -233,7 +233,7 @@ body: |
$x10 = ADDI %stack.0.a, 0
$x11 = ADDI %stack.1.b, 0
$x12 = ADDI %stack.2.c, 0
- PseudoCALL target-flags(riscv-plt) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
+ PseudoCALL target-flags(riscv-call) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
PseudoRET
@@ -278,7 +278,7 @@ body: |
$x10 = ADDI %stack.0.a, 0
$x11 = ADDI %stack.1.b, 0
$x12 = ADDI %stack.2.c, 0
- PseudoCALL target-flags(riscv-plt) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
+ PseudoCALL target-flags(riscv-call) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
PseudoRET
diff --git a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
index 76773bb38313..7aaafe9874fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
@@ -19,7 +19,7 @@ define ptr @scalar_stack_align16() nounwind {
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: sub sp, sp, a0
; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: call extern@plt
+; RV32-NEXT: call extern
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 1
@@ -36,7 +36,7 @@ define ptr @scalar_stack_align16() nounwind {
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: sub sp, sp, a0
; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: call extern@plt
+; RV64-NEXT: call extern
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 73f651225da6..07fcec13146c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -343,7 +343,7 @@ define <vscale x 1 x double> @test8(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
; CHECK-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: call foo@plt
+; CHECK-NEXT: call foo
; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: add a0, a0, sp
@@ -400,7 +400,7 @@ define <vscale x 1 x double> @test9(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
; CHECK-NEXT: add a0, a0, sp
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: call foo@plt
+; CHECK-NEXT: call foo
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
index 7bda7a387c68..d515022b7418 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
@@ -72,6 +72,10 @@
ret void
}
+ define void @pre_same_sewlmul_ratio() {
+ ret void
+ }
+
declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4
@@ -446,3 +450,56 @@ body: |
%4:vr = PseudoVMV_V_I_MF4 undef %4, 0, 4, 3, 0
PseudoRET
...
+---
+# make sure we don't try to perform PRE when one of the blocks is sew/lmul ratio
+# only
+name: pre_same_sewlmul_ratio
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: pre_same_sewlmul_ratio
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %cond:gpr = COPY $x10
+ ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: BEQ %cond, $x0, %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
+ ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 $noreg, 1, 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: BEQ %cond, $x0, %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: PseudoCALL $noreg, csr_ilp32_lp64
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: $x0 = PseudoVSETIVLI 2, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoVMV_X_S_MF2_:%[0-9]+]]:gpr = PseudoVMV_X_S_MF2 $noreg, 5 /* e32 */, implicit $vtype
+ ; CHECK-NEXT: [[PseudoVMV_V_I_MF2_1:%[0-9]+]]:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: PseudoRET
+ bb.0:
+ liveins: $x10
+ %cond:gpr = COPY $x10
+ %1:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5, 0
+ BEQ %cond, $x0, %bb.2
+ bb.1:
+ %2:vr = PseudoVMV_V_I_M1 $noreg, 1, 2, 6, 0
+ bb.2: ; the exit info here should have sew/lmul ratio only
+ BEQ %cond, $x0, %bb.4
+ bb.3:
+ PseudoCALL $noreg, csr_ilp32_lp64
+ bb.4: ; this block will have PRE attempted on it
+ %4:gpr = PseudoVMV_X_S_MF2 $noreg, 5
+ %5:vr = PseudoVMV_V_I_MF2 $noreg, 1, 2, 5, 0
+ PseudoRET
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
index e323bc69ee90..215b1ddd5de3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -83,7 +83,7 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: csrwi vxrm, 0
; RV32-NEXT: vaadd.vv v8, v8, v9
-; RV32-NEXT: call foo@plt
+; RV32-NEXT: call foo
; RV32-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV32-NEXT: csrwi vxrm, 0
; RV32-NEXT: addi a0, sp, 16
@@ -111,7 +111,7 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: csrwi vxrm, 0
; RV64-NEXT: vaadd.vv v8, v8, v9
-; RV64-NEXT: call foo@plt
+; RV64-NEXT: call foo
; RV64-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV64-NEXT: csrwi vxrm, 0
; RV64-NEXT: addi a0, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
index 6d05a8e62ea1..e629727d26c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/wrong-stack-offset-for-rvv-object.mir
@@ -189,7 +189,7 @@ body: |
; CHECK-NEXT: renamable $v8 = VL1RE8_V killed $x10 :: (load unknown-size from %stack.1, align 8)
; CHECK-NEXT: PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3 /* e8 */, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1)
; CHECK-NEXT: $x10 = COPY renamable $x9
- ; CHECK-NEXT: PseudoCALL target-flags(riscv-plt) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10
+ ; CHECK-NEXT: PseudoCALL target-flags(riscv-call) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10
; CHECK-NEXT: PseudoBR %bb.1
bb.0.entry:
successors: %bb.1(0x80000000)
@@ -219,7 +219,7 @@ body: |
PseudoVSE8_V_MF8 killed renamable $v8, renamable $x8, 2, 3, implicit $vl, implicit $vtype :: (store (s16) into %ir.0, align 1)
ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
$x10 = COPY renamable $x9
- PseudoCALL target-flags(riscv-plt) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10
+ PseudoCALL target-flags(riscv-call) @fprintf, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2, implicit-def dead $x10
ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
PseudoBR %bb.1
diff --git a/llvm/test/CodeGen/RISCV/select-and.ll b/llvm/test/CodeGen/RISCV/select-and.ll
index 5ba489043f0b..d305993f0e96 100644
--- a/llvm/test/CodeGen/RISCV/select-and.ll
+++ b/llvm/test/CodeGen/RISCV/select-and.ll
@@ -44,10 +44,10 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: beqz a1, .LBB1_3
; RV32I-NEXT: # %bb.2: # %if.then
-; RV32I-NEXT: call both@plt
+; RV32I-NEXT: call both
; RV32I-NEXT: j .LBB1_4
; RV32I-NEXT: .LBB1_3: # %if.else
-; RV32I-NEXT: call neither@plt
+; RV32I-NEXT: call neither
; RV32I-NEXT: .LBB1_4: # %if.end
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -61,10 +61,10 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: beqz a1, .LBB1_3
; RV64I-NEXT: # %bb.2: # %if.then
-; RV64I-NEXT: call both@plt
+; RV64I-NEXT: call both
; RV64I-NEXT: j .LBB1_4
; RV64I-NEXT: .LBB1_3: # %if.else
-; RV64I-NEXT: call neither@plt
+; RV64I-NEXT: call neither
; RV64I-NEXT: .LBB1_4: # %if.end
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index 01ad7b338ce7..31e25702da8b 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -230,7 +230,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV32I-NEXT: beqz a1, .LBB1_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call bar@plt
+; RV32I-NEXT: call bar
; RV32I-NEXT: .LBB1_4:
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -251,7 +251,7 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV64I-NEXT: beqz a1, .LBB1_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call bar@plt
+; RV64I-NEXT: call bar
; RV64I-NEXT: .LBB1_4:
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/select-or.ll b/llvm/test/CodeGen/RISCV/select-or.ll
index d378bb4dd563..20a5ec15290c 100644
--- a/llvm/test/CodeGen/RISCV/select-or.ll
+++ b/llvm/test/CodeGen/RISCV/select-or.ll
@@ -44,10 +44,10 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: bnez a1, .LBB1_3
; RV32I-NEXT: # %bb.2: # %if.else
-; RV32I-NEXT: call neither@plt
+; RV32I-NEXT: call neither
; RV32I-NEXT: j .LBB1_4
; RV32I-NEXT: .LBB1_3: # %if.then
-; RV32I-NEXT: call either@plt
+; RV32I-NEXT: call either
; RV32I-NEXT: .LBB1_4: # %if.end
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
@@ -61,10 +61,10 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: bnez a1, .LBB1_3
; RV64I-NEXT: # %bb.2: # %if.else
-; RV64I-NEXT: call neither@plt
+; RV64I-NEXT: call neither
; RV64I-NEXT: j .LBB1_4
; RV64I-NEXT: .LBB1_3: # %if.then
-; RV64I-NEXT: call either@plt
+; RV64I-NEXT: call either
; RV64I-NEXT: .LBB1_4: # %if.end
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll
index 14e76d1ff172..2b87026e98e6 100644
--- a/llvm/test/CodeGen/RISCV/setcc-logic.ll
+++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll
@@ -304,7 +304,7 @@ define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB13_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_sge_eq:
; RV64I: # %bb.0:
@@ -314,7 +314,7 @@ define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB13_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp sge i32 %0, %1
%6 = icmp eq i32 %2, %3
%7 = and i1 %5, %6
@@ -337,7 +337,7 @@ define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB14_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_sle_eq:
; RV64I: # %bb.0:
@@ -347,7 +347,7 @@ define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB14_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp sle i32 %0, %1
%6 = icmp eq i32 %2, %3
%7 = and i1 %5, %6
@@ -370,7 +370,7 @@ define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB15_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_uge_eq:
; RV64I: # %bb.0:
@@ -380,7 +380,7 @@ define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB15_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp uge i32 %0, %1
%6 = icmp eq i32 %2, %3
%7 = and i1 %5, %6
@@ -403,7 +403,7 @@ define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB16_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_ule_eq:
; RV64I: # %bb.0:
@@ -413,7 +413,7 @@ define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB16_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp ule i32 %0, %1
%6 = icmp eq i32 %2, %3
%7 = and i1 %5, %6
@@ -436,7 +436,7 @@ define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB17_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_sge_ne:
; RV64I: # %bb.0:
@@ -446,7 +446,7 @@ define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB17_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp sge i32 %0, %1
%6 = icmp ne i32 %2, %3
%7 = and i1 %5, %6
@@ -469,7 +469,7 @@ define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB18_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_sle_ne:
; RV64I: # %bb.0:
@@ -479,7 +479,7 @@ define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB18_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp sle i32 %0, %1
%6 = icmp ne i32 %2, %3
%7 = and i1 %5, %6
@@ -502,7 +502,7 @@ define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB19_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_uge_ne:
; RV64I: # %bb.0:
@@ -512,7 +512,7 @@ define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB19_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp uge i32 %0, %1
%6 = icmp ne i32 %2, %3
%7 = and i1 %5, %6
@@ -535,7 +535,7 @@ define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB20_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_ule_ne:
; RV64I: # %bb.0:
@@ -545,7 +545,7 @@ define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB20_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp ule i32 %0, %1
%6 = icmp ne i32 %2, %3
%7 = and i1 %5, %6
@@ -566,7 +566,7 @@ define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: beq a2, a3, .LBB21_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB21_3:
; RV32I-NEXT: ret
;
@@ -576,7 +576,7 @@ define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: beq a2, a3, .LBB21_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB21_3:
; RV64I-NEXT: ret
%5 = icmp sge i32 %0, %1
@@ -599,7 +599,7 @@ define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: beq a2, a3, .LBB22_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB22_3:
; RV32I-NEXT: ret
;
@@ -609,7 +609,7 @@ define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: beq a2, a3, .LBB22_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB22_3:
; RV64I-NEXT: ret
%5 = icmp sle i32 %0, %1
@@ -632,7 +632,7 @@ define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: beq a2, a3, .LBB23_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB23_3:
; RV32I-NEXT: ret
;
@@ -642,7 +642,7 @@ define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: beq a2, a3, .LBB23_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB23_3:
; RV64I-NEXT: ret
%5 = icmp uge i32 %0, %1
@@ -665,7 +665,7 @@ define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: beq a2, a3, .LBB24_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB24_3:
; RV32I-NEXT: ret
;
@@ -675,7 +675,7 @@ define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: beq a2, a3, .LBB24_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB24_3:
; RV64I-NEXT: ret
%5 = icmp ule i32 %0, %1
@@ -698,7 +698,7 @@ define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: bne a2, a3, .LBB25_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB25_3:
; RV32I-NEXT: ret
;
@@ -708,7 +708,7 @@ define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: bne a2, a3, .LBB25_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB25_3:
; RV64I-NEXT: ret
%5 = icmp sge i32 %0, %1
@@ -731,7 +731,7 @@ define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: bne a2, a3, .LBB26_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB26_3:
; RV32I-NEXT: ret
;
@@ -741,7 +741,7 @@ define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: bne a2, a3, .LBB26_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB26_3:
; RV64I-NEXT: ret
%5 = icmp sle i32 %0, %1
@@ -764,7 +764,7 @@ define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: bne a2, a3, .LBB27_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB27_3:
; RV32I-NEXT: ret
;
@@ -774,7 +774,7 @@ define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: bne a2, a3, .LBB27_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB27_3:
; RV64I-NEXT: ret
%5 = icmp uge i32 %0, %1
@@ -797,7 +797,7 @@ define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: bne a2, a3, .LBB28_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB28_3:
; RV32I-NEXT: ret
;
@@ -807,7 +807,7 @@ define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: bne a2, a3, .LBB28_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB28_3:
; RV64I-NEXT: ret
%5 = icmp ule i32 %0, %1
@@ -832,7 +832,7 @@ define void @and_eq_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB29_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_eq_sge:
; RV64I: # %bb.0:
@@ -842,7 +842,7 @@ define void @and_eq_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB29_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp eq i32 %0, %1
%6 = icmp sge i32 %2, %3
%7 = and i1 %5, %6
@@ -865,7 +865,7 @@ define void @and_eq_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB30_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_eq_sle:
; RV64I: # %bb.0:
@@ -875,7 +875,7 @@ define void @and_eq_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB30_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp eq i32 %0, %1
%6 = icmp sle i32 %2, %3
%7 = and i1 %5, %6
@@ -898,7 +898,7 @@ define void @and_eq_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB31_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_eq_uge:
; RV64I: # %bb.0:
@@ -908,7 +908,7 @@ define void @and_eq_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB31_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp eq i32 %0, %1
%6 = icmp uge i32 %2, %3
%7 = and i1 %5, %6
@@ -931,7 +931,7 @@ define void @and_eq_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB32_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_eq_ule:
; RV64I: # %bb.0:
@@ -941,7 +941,7 @@ define void @and_eq_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB32_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp eq i32 %0, %1
%6 = icmp ule i32 %2, %3
%7 = and i1 %5, %6
@@ -964,7 +964,7 @@ define void @and_ne_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB33_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_ne_sge:
; RV64I: # %bb.0:
@@ -974,7 +974,7 @@ define void @and_ne_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB33_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp ne i32 %0, %1
%6 = icmp sge i32 %2, %3
%7 = and i1 %5, %6
@@ -997,7 +997,7 @@ define void @and_ne_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB34_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_ne_sle:
; RV64I: # %bb.0:
@@ -1007,7 +1007,7 @@ define void @and_ne_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB34_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp ne i32 %0, %1
%6 = icmp sle i32 %2, %3
%7 = and i1 %5, %6
@@ -1030,7 +1030,7 @@ define void @and_ne_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB35_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_ne_uge:
; RV64I: # %bb.0:
@@ -1040,7 +1040,7 @@ define void @and_ne_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB35_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp ne i32 %0, %1
%6 = icmp uge i32 %2, %3
%7 = and i1 %5, %6
@@ -1063,7 +1063,7 @@ define void @and_ne_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB36_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_ne_ule:
; RV64I: # %bb.0:
@@ -1073,7 +1073,7 @@ define void @and_ne_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB36_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%5 = icmp ne i32 %0, %1
%6 = icmp ule i32 %2, %3
%7 = and i1 %5, %6
@@ -1096,7 +1096,7 @@ define void @and_sge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB37_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_sge_gt0:
; RV64I: # %bb.0:
@@ -1106,7 +1106,7 @@ define void @and_sge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB37_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%4 = icmp sge i32 %0, %1
%5 = icmp sgt i32 %2, 0
%6 = and i1 %4, %5
@@ -1129,7 +1129,7 @@ define void @and_sle_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV32I-NEXT: # %bb.2:
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB38_3:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
;
; RV64I-LABEL: and_sle_lt1:
; RV64I: # %bb.0:
@@ -1139,7 +1139,7 @@ define void @and_sle_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV64I-NEXT: # %bb.2:
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB38_3:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
%4 = icmp sle i32 %0, %1
%5 = icmp slt i32 %2, 1
%6 = and i1 %4, %5
@@ -1160,7 +1160,7 @@ define void @or_uge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: bgtz a2, .LBB39_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB39_3:
; RV32I-NEXT: ret
;
@@ -1170,7 +1170,7 @@ define void @or_uge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: bgtz a2, .LBB39_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB39_3:
; RV64I-NEXT: ret
%4 = icmp uge i32 %0, %1
@@ -1193,7 +1193,7 @@ define void @or_ule_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: blez a2, .LBB40_3
; RV32I-NEXT: # %bb.2:
-; RV32I-NEXT: tail bar@plt
+; RV32I-NEXT: tail bar
; RV32I-NEXT: .LBB40_3:
; RV32I-NEXT: ret
;
@@ -1203,7 +1203,7 @@ define void @or_ule_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: blez a2, .LBB40_3
; RV64I-NEXT: # %bb.2:
-; RV64I-NEXT: tail bar@plt
+; RV64I-NEXT: tail bar
; RV64I-NEXT: .LBB40_3:
; RV64I-NEXT: ret
%4 = icmp ule i32 %0, %1
diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll
index e730092674e0..9c56c03f3609 100644
--- a/llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -18,7 +18,7 @@ define void @test1(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: .LBB0_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mv a0, s1
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: sllw s1, s1, s0
; CHECK-NEXT: bnez a0, .LBB0_1
; CHECK-NEXT: # %bb.2: # %bb7
@@ -39,7 +39,7 @@ define void @test1(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB0_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, s1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: sllw s1, s1, s0
; NOREMOVAL-NEXT: bnez a0, .LBB0_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -186,7 +186,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
; RV64I-NEXT: addi s3, a1, 257
; RV64I-NEXT: .LBB4_1: # %bb2
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: call bar@plt
+; RV64I-NEXT: call bar
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: srli a0, a0, 1
; RV64I-NEXT: and a0, a0, s0
@@ -217,7 +217,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
; RV64ZBB-NEXT: sraw a0, a0, a1
; RV64ZBB-NEXT: .LBB4_1: # %bb2
; RV64ZBB-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64ZBB-NEXT: call bar@plt
+; RV64ZBB-NEXT: call bar
; RV64ZBB-NEXT: mv a1, a0
; RV64ZBB-NEXT: cpopw a0, a0
; RV64ZBB-NEXT: bnez a1, .LBB4_1
@@ -234,7 +234,7 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB4_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, a1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: cpopw a1, a0
; NOREMOVAL-NEXT: bnez a0, .LBB4_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -268,7 +268,7 @@ define void @test6(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: fmv.w.x fs0, zero
; CHECK-NEXT: .LBB5_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: call baz@plt
+; CHECK-NEXT: call baz
; CHECK-NEXT: feq.s a1, fa0, fs0
; CHECK-NEXT: fcvt.w.s a0, fa0, rtz
; CHECK-NEXT: beqz a1, .LBB5_1
@@ -288,7 +288,7 @@ define void @test6(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB5_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, a0
-; NOREMOVAL-NEXT: call baz@plt
+; NOREMOVAL-NEXT: call baz
; NOREMOVAL-NEXT: feq.s a1, fa0, fs0
; NOREMOVAL-NEXT: fcvt.w.s a0, fa0, rtz
; NOREMOVAL-NEXT: beqz a1, .LBB5_1
@@ -341,7 +341,7 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
; RV64I-NEXT: add s3, s3, a1
; RV64I-NEXT: .LBB6_1: # %bb2
; RV64I-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64I-NEXT: call foo@plt
+; RV64I-NEXT: call foo
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: and a1, a1, s0
; RV64I-NEXT: sub a0, a0, a1
@@ -371,7 +371,7 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
; RV64ZBB-NEXT: sraw a0, a0, a1
; RV64ZBB-NEXT: .LBB6_1: # %bb2
; RV64ZBB-NEXT: # =>This Inner Loop Header: Depth=1
-; RV64ZBB-NEXT: call foo@plt
+; RV64ZBB-NEXT: call foo
; RV64ZBB-NEXT: cpop a0, a0
; RV64ZBB-NEXT: bnez a0, .LBB6_1
; RV64ZBB-NEXT: # %bb.2: # %bb7
@@ -387,7 +387,7 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB6_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, a0
-; NOREMOVAL-NEXT: call foo@plt
+; NOREMOVAL-NEXT: call foo
; NOREMOVAL-NEXT: cpop a0, a0
; NOREMOVAL-NEXT: bnez a0, .LBB6_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -420,7 +420,7 @@ define void @test8(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: sraw a0, a0, a1
; CHECK-NEXT: .LBB7_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: call foo@plt
+; CHECK-NEXT: call foo
; CHECK-NEXT: ori a0, a0, -256
; CHECK-NEXT: bnez a0, .LBB7_1
; CHECK-NEXT: # %bb.2: # %bb7
@@ -436,7 +436,7 @@ define void @test8(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB7_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, a0
-; NOREMOVAL-NEXT: call foo@plt
+; NOREMOVAL-NEXT: call foo
; NOREMOVAL-NEXT: ori a0, a0, -256
; NOREMOVAL-NEXT: bnez a0, .LBB7_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -471,7 +471,7 @@ define void @test9(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: li s0, 254
; CHECK-NEXT: .LBB8_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: slti a0, a0, 255
; CHECK-NEXT: blt s0, a1, .LBB8_1
@@ -491,7 +491,7 @@ define void @test9(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB8_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, a1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: slti a1, a0, 255
; NOREMOVAL-NEXT: blt s0, a0, .LBB8_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -525,7 +525,7 @@ define void @test10(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: fmv.w.x fs0, zero
; CHECK-NEXT: .LBB9_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: call baz@plt
+; CHECK-NEXT: call baz
; CHECK-NEXT: feq.s a1, fa0, fs0
; CHECK-NEXT: fmv.x.w a0, fa0
; CHECK-NEXT: beqz a1, .LBB9_1
@@ -545,7 +545,7 @@ define void @test10(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB9_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, a0
-; NOREMOVAL-NEXT: call baz@plt
+; NOREMOVAL-NEXT: call baz
; NOREMOVAL-NEXT: feq.s a1, fa0, fs0
; NOREMOVAL-NEXT: fmv.x.w a0, fa0
; NOREMOVAL-NEXT: beqz a1, .LBB9_1
@@ -1152,12 +1152,12 @@ define void @test16(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: mv s0, a1
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: mv s1, a0
; CHECK-NEXT: .LBB19_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mv a0, s1
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: sllw s1, s1, s0
; CHECK-NEXT: bnez a0, .LBB19_1
; CHECK-NEXT: # %bb.2: # %bb7
@@ -1174,12 +1174,12 @@ define void @test16(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; NOREMOVAL-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; NOREMOVAL-NEXT: mv s0, a1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: mv s1, a0
; NOREMOVAL-NEXT: .LBB19_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, s1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: sllw s1, s1, s0
; NOREMOVAL-NEXT: bnez a0, .LBB19_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -1211,12 +1211,12 @@ define void @test17(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: mv s0, a1
-; CHECK-NEXT: call bat@plt
+; CHECK-NEXT: call bat
; CHECK-NEXT: mv s1, a0
; CHECK-NEXT: .LBB20_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mv a0, s1
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: sllw s1, s1, s0
; CHECK-NEXT: bnez a0, .LBB20_1
; CHECK-NEXT: # %bb.2: # %bb7
@@ -1233,12 +1233,12 @@ define void @test17(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; NOREMOVAL-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; NOREMOVAL-NEXT: mv s0, a1
-; NOREMOVAL-NEXT: call bat@plt
+; NOREMOVAL-NEXT: call bat
; NOREMOVAL-NEXT: mv s1, a0
; NOREMOVAL-NEXT: .LBB20_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, s1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: sllw s1, s1, s0
; NOREMOVAL-NEXT: bnez a0, .LBB20_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -1276,7 +1276,7 @@ define void @test18(i32 signext %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: .LBB21_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mv a0, s1
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: sllw s1, s1, s0
; CHECK-NEXT: bnez a0, .LBB21_1
; CHECK-NEXT: # %bb.2: # %bb7
@@ -1297,7 +1297,7 @@ define void @test18(i32 signext %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB21_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, s1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: sllw s1, s1, s0
; NOREMOVAL-NEXT: bnez a0, .LBB21_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
@@ -1385,10 +1385,10 @@ define signext i32 @test19(i64 %arg, i1 zeroext %c1, i1 zeroext %c2, ptr %p) nou
; CHECK-NEXT: beqz a2, .LBB23_2
; CHECK-NEXT: # %bb.1: # %bb2
; CHECK-NEXT: li a0, 0
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: mv s0, a0
; CHECK-NEXT: .LBB23_2: # %bb7
-; CHECK-NEXT: call side_effect@plt
+; CHECK-NEXT: call side_effect
; CHECK-NEXT: sext.w a0, s0
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -1409,10 +1409,10 @@ define signext i32 @test19(i64 %arg, i1 zeroext %c1, i1 zeroext %c2, ptr %p) nou
; NOREMOVAL-NEXT: beqz a2, .LBB23_2
; NOREMOVAL-NEXT: # %bb.1: # %bb2
; NOREMOVAL-NEXT: li a0, 0
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: mv s0, a0
; NOREMOVAL-NEXT: .LBB23_2: # %bb7
-; NOREMOVAL-NEXT: call side_effect@plt
+; NOREMOVAL-NEXT: call side_effect
; NOREMOVAL-NEXT: sext.w a0, s0
; NOREMOVAL-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; NOREMOVAL-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
@@ -1450,7 +1450,7 @@ define void @test20(<vscale x 1 x i32> %arg, i32 signext %arg1) nounwind {
; CHECK-NEXT: .LBB24_1: # %bb2
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mv a0, s1
-; CHECK-NEXT: call bar@plt
+; CHECK-NEXT: call bar
; CHECK-NEXT: sllw s1, s1, s0
; CHECK-NEXT: bnez a0, .LBB24_1
; CHECK-NEXT: # %bb.2: # %bb7
@@ -1472,7 +1472,7 @@ define void @test20(<vscale x 1 x i32> %arg, i32 signext %arg1) nounwind {
; NOREMOVAL-NEXT: .LBB24_1: # %bb2
; NOREMOVAL-NEXT: # =>This Inner Loop Header: Depth=1
; NOREMOVAL-NEXT: sext.w a0, s1
-; NOREMOVAL-NEXT: call bar@plt
+; NOREMOVAL-NEXT: call bar
; NOREMOVAL-NEXT: sllw s1, s1, s0
; NOREMOVAL-NEXT: bnez a0, .LBB24_1
; NOREMOVAL-NEXT: # %bb.2: # %bb7
diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
index fee067ee3ad1..b41b87aaf4d0 100644
--- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll
+++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll
@@ -20,11 +20,11 @@ declare void @foo()
define void @f2() shadowcallstack {
; RV32-LABEL: f2:
; RV32: # %bb.0:
-; RV32-NEXT: tail foo@plt
+; RV32-NEXT: tail foo
;
; RV64-LABEL: f2:
; RV64: # %bb.0:
-; RV64-NEXT: tail foo@plt
+; RV64-NEXT: tail foo
tail call void @foo()
ret void
}
@@ -41,7 +41,7 @@ define i32 @f3() shadowcallstack {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: call bar@plt
+; RV32-NEXT: call bar
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: lw ra, -4(gp)
@@ -58,7 +58,7 @@ define i32 @f3() shadowcallstack {
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: call bar@plt
+; RV64-NEXT: call bar
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ld ra, -8(gp)
@@ -86,13 +86,13 @@ define i32 @f4() shadowcallstack {
; RV32-NEXT: .cfi_offset s0, -8
; RV32-NEXT: .cfi_offset s1, -12
; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: call bar@plt
+; RV32-NEXT: call bar
; RV32-NEXT: mv s0, a0
-; RV32-NEXT: call bar@plt
+; RV32-NEXT: call bar
; RV32-NEXT: mv s1, a0
-; RV32-NEXT: call bar@plt
+; RV32-NEXT: call bar
; RV32-NEXT: mv s2, a0
-; RV32-NEXT: call bar@plt
+; RV32-NEXT: call bar
; RV32-NEXT: add s0, s0, s1
; RV32-NEXT: add a0, s2, a0
; RV32-NEXT: add a0, s0, a0
@@ -121,13 +121,13 @@ define i32 @f4() shadowcallstack {
; RV64-NEXT: .cfi_offset s0, -16
; RV64-NEXT: .cfi_offset s1, -24
; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: call bar@plt
+; RV64-NEXT: call bar
; RV64-NEXT: mv s0, a0
-; RV64-NEXT: call bar@plt
+; RV64-NEXT: call bar
; RV64-NEXT: mv s1, a0
-; RV64-NEXT: call bar@plt
+; RV64-NEXT: call bar
; RV64-NEXT: mv s2, a0
-; RV64-NEXT: call bar@plt
+; RV64-NEXT: call bar
; RV64-NEXT: add s0, s0, s1
; RV64-NEXT: add a0, s2, a0
; RV64-NEXT: addw a0, s0, a0
@@ -157,7 +157,7 @@ define i32 @f5() shadowcallstack nounwind {
; RV32-NEXT: sw ra, -4(gp)
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT: call bar@plt
+; RV32-NEXT: call bar
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: lw ra, -4(gp)
@@ -170,7 +170,7 @@ define i32 @f5() shadowcallstack nounwind {
; RV64-NEXT: sd ra, -8(gp)
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT: call bar@plt
+; RV64-NEXT: call bar
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ld ra, -8(gp)
diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll
index 97121c275a29..f61cbfd3ed72 100644
--- a/llvm/test/CodeGen/RISCV/shifts.ll
+++ b/llvm/test/CodeGen/RISCV/shifts.ll
@@ -43,7 +43,7 @@ define i64 @lshr64_minsize(i64 %a, i64 %b) minsize nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __lshrdi3@plt
+; RV32I-NEXT: call __lshrdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -89,7 +89,7 @@ define i64 @ashr64_minsize(i64 %a, i64 %b) minsize nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ashrdi3@plt
+; RV32I-NEXT: call __ashrdi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -135,7 +135,7 @@ define i64 @shl64_minsize(i64 %a, i64 %b) minsize nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT: call __ashldi3@plt
+; RV32I-NEXT: call __ashldi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
index 725b8fd6eeea..d007c245d21b 100644
--- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
@@ -431,7 +431,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; NOSFB-NEXT: .LBB15_1: # %bb2
; NOSFB-NEXT: # =>This Inner Loop Header: Depth=1
; NOSFB-NEXT: mv a0, s1
-; NOSFB-NEXT: call bar@plt
+; NOSFB-NEXT: call bar
; NOSFB-NEXT: sllw s1, s1, s0
; NOSFB-NEXT: bnez a0, .LBB15_1
; NOSFB-NEXT: # %bb.2: # %bb7
@@ -457,7 +457,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; RV64SFB-NEXT: .LBB15_1: # %bb2
; RV64SFB-NEXT: # =>This Inner Loop Header: Depth=1
; RV64SFB-NEXT: mv a0, s0
-; RV64SFB-NEXT: call bar@plt
+; RV64SFB-NEXT: call bar
; RV64SFB-NEXT: sllw s0, s0, s1
; RV64SFB-NEXT: bnez a0, .LBB15_1
; RV64SFB-NEXT: # %bb.2: # %bb7
@@ -483,7 +483,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; ZICOND-NEXT: .LBB15_1: # %bb2
; ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
; ZICOND-NEXT: mv a0, s0
-; ZICOND-NEXT: call bar@plt
+; ZICOND-NEXT: call bar
; ZICOND-NEXT: sllw s0, s0, s1
; ZICOND-NEXT: bnez a0, .LBB15_1
; ZICOND-NEXT: # %bb.2: # %bb7
@@ -509,7 +509,7 @@ define void @sextw_removal_ccor(i1 %c, i32 signext %arg, i32 signext %arg1, i32
; RV32SFB-NEXT: .LBB15_1: # %bb2
; RV32SFB-NEXT: # =>This Inner Loop Header: Depth=1
; RV32SFB-NEXT: mv a0, s0
-; RV32SFB-NEXT: call bar@plt
+; RV32SFB-NEXT: call bar
; RV32SFB-NEXT: sll s0, s0, s1
; RV32SFB-NEXT: bnez a0, .LBB15_1
; RV32SFB-NEXT: # %bb.2: # %bb7
@@ -550,7 +550,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; NOSFB-NEXT: .LBB16_1: # %bb2
; NOSFB-NEXT: # =>This Inner Loop Header: Depth=1
; NOSFB-NEXT: mv a0, s1
-; NOSFB-NEXT: call bar@plt
+; NOSFB-NEXT: call bar
; NOSFB-NEXT: sllw s1, s1, s0
; NOSFB-NEXT: bnez a0, .LBB16_1
; NOSFB-NEXT: # %bb.2: # %bb7
@@ -576,7 +576,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; RV64SFB-NEXT: .LBB16_1: # %bb2
; RV64SFB-NEXT: # =>This Inner Loop Header: Depth=1
; RV64SFB-NEXT: mv a0, s0
-; RV64SFB-NEXT: call bar@plt
+; RV64SFB-NEXT: call bar
; RV64SFB-NEXT: sllw s0, s0, s1
; RV64SFB-NEXT: bnez a0, .LBB16_1
; RV64SFB-NEXT: # %bb.2: # %bb7
@@ -602,7 +602,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; ZICOND-NEXT: .LBB16_1: # %bb2
; ZICOND-NEXT: # =>This Inner Loop Header: Depth=1
; ZICOND-NEXT: mv a0, s0
-; ZICOND-NEXT: call bar@plt
+; ZICOND-NEXT: call bar
; ZICOND-NEXT: sllw s0, s0, s1
; ZICOND-NEXT: bnez a0, .LBB16_1
; ZICOND-NEXT: # %bb.2: # %bb7
@@ -628,7 +628,7 @@ define void @sextw_removal_ccaddw(i1 %c, i32 signext %arg, i32 signext %arg1, i3
; RV32SFB-NEXT: .LBB16_1: # %bb2
; RV32SFB-NEXT: # =>This Inner Loop Header: Depth=1
; RV32SFB-NEXT: mv a0, s0
-; RV32SFB-NEXT: call bar@plt
+; RV32SFB-NEXT: call bar
; RV32SFB-NEXT: sll s0, s0, s1
; RV32SFB-NEXT: bnez a0, .LBB16_1
; RV32SFB-NEXT: # %bb.2: # %bb7
diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll b/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
index 1c57b0f7e603..5e557de37423 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap-jump-table.ll
@@ -23,21 +23,21 @@ define dso_local signext i32 @test_shrinkwrap_jump_table(ptr noundef %m) local_u
; CHECK-NEXT: lw a1, 0(a1)
; CHECK-NEXT: jr a1
; CHECK-NEXT: .LBB0_2: # %sw.bb
-; CHECK-NEXT: tail func1@plt
+; CHECK-NEXT: tail func1
; CHECK-NEXT: .LBB0_3: # %sw.bb7
-; CHECK-NEXT: tail func5@plt
+; CHECK-NEXT: tail func5
; CHECK-NEXT: .LBB0_4: # %sw.bb3
-; CHECK-NEXT: tail func3@plt
+; CHECK-NEXT: tail func3
; CHECK-NEXT: .LBB0_5: # %sw.bb5
-; CHECK-NEXT: tail func4@plt
+; CHECK-NEXT: tail func4
; CHECK-NEXT: .LBB0_6: # %sw.bb1
-; CHECK-NEXT: tail func2@plt
+; CHECK-NEXT: tail func2
; CHECK-NEXT: .LBB0_7: # %sw.default
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
-; CHECK-NEXT: call default_func@plt
+; CHECK-NEXT: call default_func
; CHECK-NEXT: li a0, 0
; CHECK-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap.ll b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
index 16f062a78323..40577701e1e2 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
@@ -22,7 +22,7 @@ define void @eliminate_restore(i32 %n) nounwind {
; RV32I-SW-NO-NEXT: addi sp, sp, 16
; RV32I-SW-NO-NEXT: ret
; RV32I-SW-NO-NEXT: .LBB0_2: # %if.then
-; RV32I-SW-NO-NEXT: call abort@plt
+; RV32I-SW-NO-NEXT: call abort
;
; RV32I-SW-LABEL: eliminate_restore:
; RV32I-SW: # %bb.0:
@@ -33,7 +33,7 @@ define void @eliminate_restore(i32 %n) nounwind {
; RV32I-SW-NEXT: .LBB0_2: # %if.then
; RV32I-SW-NEXT: addi sp, sp, -16
; RV32I-SW-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-SW-NEXT: call abort@plt
+; RV32I-SW-NEXT: call abort
;
; RV32I-SW-SR-LABEL: eliminate_restore:
; RV32I-SW-SR: # %bb.0:
@@ -43,7 +43,7 @@ define void @eliminate_restore(i32 %n) nounwind {
; RV32I-SW-SR-NEXT: ret
; RV32I-SW-SR-NEXT: .LBB0_2: # %if.then
; RV32I-SW-SR-NEXT: call t0, __riscv_save_0
-; RV32I-SW-SR-NEXT: call abort@plt
+; RV32I-SW-SR-NEXT: call abort
;
; RV64I-SW-LABEL: eliminate_restore:
; RV64I-SW: # %bb.0:
@@ -55,7 +55,7 @@ define void @eliminate_restore(i32 %n) nounwind {
; RV64I-SW-NEXT: .LBB0_2: # %if.then
; RV64I-SW-NEXT: addi sp, sp, -16
; RV64I-SW-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-SW-NEXT: call abort@plt
+; RV64I-SW-NEXT: call abort
%cmp = icmp ule i32 %n, 32
br i1 %cmp, label %if.then, label %if.end
@@ -83,7 +83,7 @@ define void @conditional_alloca(i32 %n) nounwind {
; RV32I-SW-NO-NEXT: andi a0, a0, -16
; RV32I-SW-NO-NEXT: sub a0, sp, a0
; RV32I-SW-NO-NEXT: mv sp, a0
-; RV32I-SW-NO-NEXT: call notdead@plt
+; RV32I-SW-NO-NEXT: call notdead
; RV32I-SW-NO-NEXT: .LBB1_2: # %if.end
; RV32I-SW-NO-NEXT: addi sp, s0, -16
; RV32I-SW-NO-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -104,7 +104,7 @@ define void @conditional_alloca(i32 %n) nounwind {
; RV32I-SW-NEXT: andi a0, a0, -16
; RV32I-SW-NEXT: sub a0, sp, a0
; RV32I-SW-NEXT: mv sp, a0
-; RV32I-SW-NEXT: call notdead@plt
+; RV32I-SW-NEXT: call notdead
; RV32I-SW-NEXT: addi sp, s0, -16
; RV32I-SW-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-SW-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -123,7 +123,7 @@ define void @conditional_alloca(i32 %n) nounwind {
; RV32I-SW-SR-NEXT: andi a0, a0, -16
; RV32I-SW-SR-NEXT: sub a0, sp, a0
; RV32I-SW-SR-NEXT: mv sp, a0
-; RV32I-SW-SR-NEXT: call notdead@plt
+; RV32I-SW-SR-NEXT: call notdead
; RV32I-SW-SR-NEXT: addi sp, s0, -16
; RV32I-SW-SR-NEXT: tail __riscv_restore_1
; RV32I-SW-SR-NEXT: .LBB1_2: # %if.end
@@ -145,7 +145,7 @@ define void @conditional_alloca(i32 %n) nounwind {
; RV64I-SW-NEXT: andi a0, a0, -16
; RV64I-SW-NEXT: sub a0, sp, a0
; RV64I-SW-NEXT: mv sp, a0
-; RV64I-SW-NEXT: call notdead@plt
+; RV64I-SW-NEXT: call notdead
; RV64I-SW-NEXT: addi sp, s0, -16
; RV64I-SW-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-SW-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
index 63a2c04de6c1..7889e005399f 100644
--- a/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
+++ b/llvm/test/CodeGen/RISCV/split-sp-adjust.ll
@@ -10,7 +10,7 @@ define i32 @SplitSP() nounwind {
; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: addi a0, sp, 16
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: li a0, 0
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
@@ -29,7 +29,7 @@ define i32 @NoSplitSP() nounwind {
; RV32I-NEXT: addi sp, sp, -2032
; RV32I-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill
; RV32I-NEXT: addi a0, sp, 4
-; RV32I-NEXT: call foo@plt
+; RV32I-NEXT: call foo
; RV32I-NEXT: li a0, 0
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 2032
diff --git a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
index b15f17ea9fb5..5fa802b7f27c 100644
--- a/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/split-udiv-by-constant.ll
@@ -121,7 +121,7 @@ define iXLen2 @test_udiv_7(iXLen2 %x) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 7
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __udivdi3@plt
+; RV32-NEXT: call __udivdi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -132,7 +132,7 @@ define iXLen2 @test_udiv_7(iXLen2 %x) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: li a2, 7
; RV64-NEXT: li a3, 0
-; RV64-NEXT: call __udivti3@plt
+; RV64-NEXT: call __udivti3
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -147,7 +147,7 @@ define iXLen2 @test_udiv_9(iXLen2 %x) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 9
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __udivdi3@plt
+; RV32-NEXT: call __udivdi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -158,7 +158,7 @@ define iXLen2 @test_udiv_9(iXLen2 %x) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: li a2, 9
; RV64-NEXT: li a3, 0
-; RV64-NEXT: call __udivti3@plt
+; RV64-NEXT: call __udivti3
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll b/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
index cdfb1ef0ab4d..8444520fcc77 100644
--- a/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/split-urem-by-constant.ll
@@ -83,7 +83,7 @@ define iXLen2 @test_urem_7(iXLen2 %x) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 7
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __umoddi3@plt
+; RV32-NEXT: call __umoddi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -94,7 +94,7 @@ define iXLen2 @test_urem_7(iXLen2 %x) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: li a2, 7
; RV64-NEXT: li a3, 0
-; RV64-NEXT: call __umodti3@plt
+; RV64-NEXT: call __umodti3
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -109,7 +109,7 @@ define iXLen2 @test_urem_9(iXLen2 %x) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a2, 9
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __umoddi3@plt
+; RV32-NEXT: call __umoddi3
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -120,7 +120,7 @@ define iXLen2 @test_urem_9(iXLen2 %x) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: li a2, 9
; RV64-NEXT: li a3, 0
-; RV64-NEXT: call __umodti3@plt
+; RV64-NEXT: call __umodti3
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll
index 1dcb04382392..7c291bbceedc 100644
--- a/llvm/test/CodeGen/RISCV/srem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll
@@ -12,7 +12,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
; RV32I-LABEL: fold_srem_positive_odd:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 95
-; RV32I-NEXT: tail __modsi3@plt
+; RV32I-NEXT: tail __modsi3
;
; RV32IM-LABEL: fold_srem_positive_odd:
; RV32IM: # %bb.0:
@@ -34,7 +34,7 @@ define i32 @fold_srem_positive_odd(i32 %x) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: li a1, 95
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -63,7 +63,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
; RV32I-LABEL: fold_srem_positive_even:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 1060
-; RV32I-NEXT: tail __modsi3@plt
+; RV32I-NEXT: tail __modsi3
;
; RV32IM-LABEL: fold_srem_positive_even:
; RV32IM: # %bb.0:
@@ -84,7 +84,7 @@ define i32 @fold_srem_positive_even(i32 %x) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: li a1, 1060
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -111,7 +111,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
; RV32I-LABEL: fold_srem_negative_odd:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, -723
-; RV32I-NEXT: tail __modsi3@plt
+; RV32I-NEXT: tail __modsi3
;
; RV32IM-LABEL: fold_srem_negative_odd:
; RV32IM: # %bb.0:
@@ -132,7 +132,7 @@ define i32 @fold_srem_negative_odd(i32 %x) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: li a1, -723
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -160,7 +160,7 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 1048570
; RV32I-NEXT: addi a1, a1, 1595
-; RV32I-NEXT: tail __modsi3@plt
+; RV32I-NEXT: tail __modsi3
;
; RV32IM-LABEL: fold_srem_negative_even:
; RV32IM: # %bb.0:
@@ -183,7 +183,7 @@ define i32 @fold_srem_negative_even(i32 %x) nounwind {
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: lui a1, 1048570
; RV64I-NEXT: addiw a1, a1, 1595
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -217,11 +217,11 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 95
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: add a0, s1, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -253,11 +253,11 @@ define i32 @combine_srem_sdiv(i32 %x) nounwind {
; RV64I-NEXT: sext.w s0, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: addw a0, s1, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -391,7 +391,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 98
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __moddi3@plt
+; RV32I-NEXT: call __moddi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -402,7 +402,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: li a2, 98
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __moddi3@plt
+; RV32IM-NEXT: call __moddi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -410,7 +410,7 @@ define i64 @dont_fold_srem_i64(i64 %x) nounwind {
; RV64I-LABEL: dont_fold_srem_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 98
-; RV64I-NEXT: tail __moddi3@plt
+; RV64I-NEXT: tail __moddi3
;
; RV64IM-LABEL: dont_fold_srem_i64:
; RV64IM: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 122388c1b73e..30ac8de517f6 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -13,7 +13,7 @@ define i1 @test_srem_odd(i29 %X) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: lui a1, 128424
; RV32-NEXT: addi a1, a1, 331
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: lui a1, 662
; RV32-NEXT: addi a1, a1, -83
; RV32-NEXT: add a0, a0, a1
@@ -32,7 +32,7 @@ define i1 @test_srem_odd(i29 %X) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: lui a1, 128424
; RV64-NEXT: addiw a1, a1, 331
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: lui a1, 662
; RV64-NEXT: addi a1, a1, -83
; RV64-NEXT: add a0, a0, a1
@@ -117,7 +117,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV32-NEXT: slli a0, a0, 28
; RV32-NEXT: srai a0, a0, 28
; RV32-NEXT: li a1, 6
-; RV32-NEXT: call __modsi3@plt
+; RV32-NEXT: call __modsi3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -131,7 +131,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
; RV64-NEXT: slli a0, a0, 60
; RV64-NEXT: srai a0, a0, 60
; RV64-NEXT: li a1, 6
-; RV64-NEXT: call __moddi3@plt
+; RV64-NEXT: call __moddi3
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -330,21 +330,21 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32-NEXT: srai a1, a1, 31
; RV32-NEXT: li a2, 6
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __moddi3@plt
+; RV32-NEXT: call __moddi3
; RV32-NEXT: mv s5, a0
; RV32-NEXT: mv s6, a1
; RV32-NEXT: li a2, 7
; RV32-NEXT: mv a0, s2
; RV32-NEXT: mv a1, s4
; RV32-NEXT: li a3, 0
-; RV32-NEXT: call __moddi3@plt
+; RV32-NEXT: call __moddi3
; RV32-NEXT: mv s2, a0
; RV32-NEXT: mv s4, a1
; RV32-NEXT: li a2, -5
; RV32-NEXT: li a3, -1
; RV32-NEXT: mv a0, s1
; RV32-NEXT: mv a1, s3
-; RV32-NEXT: call __moddi3@plt
+; RV32-NEXT: call __moddi3
; RV32-NEXT: or a2, s5, s6
; RV32-NEXT: snez a2, a2
; RV32-NEXT: xori a0, a0, 2
@@ -403,18 +403,18 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64-NEXT: slli a2, a2, 31
; RV64-NEXT: srai s2, a2, 31
; RV64-NEXT: li a1, 7
-; RV64-NEXT: call __moddi3@plt
+; RV64-NEXT: call __moddi3
; RV64-NEXT: mv s3, a0
; RV64-NEXT: li a1, -5
; RV64-NEXT: mv a0, s1
-; RV64-NEXT: call __moddi3@plt
+; RV64-NEXT: call __moddi3
; RV64-NEXT: mv s1, a0
; RV64-NEXT: lui a0, 699051
; RV64-NEXT: addiw a1, a0, -1365
; RV64-NEXT: slli a0, a1, 32
; RV64-NEXT: add a1, a1, a0
; RV64-NEXT: mv a0, s2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: lui a1, %hi(.LCPI3_0)
; RV64-NEXT: ld a1, %lo(.LCPI3_0)(a1)
; RV64-NEXT: add a0, a0, a1
@@ -482,21 +482,21 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32M-NEXT: srai a1, a1, 31
; RV32M-NEXT: li a2, 6
; RV32M-NEXT: li a3, 0
-; RV32M-NEXT: call __moddi3@plt
+; RV32M-NEXT: call __moddi3
; RV32M-NEXT: mv s5, a0
; RV32M-NEXT: mv s6, a1
; RV32M-NEXT: li a2, 7
; RV32M-NEXT: mv a0, s2
; RV32M-NEXT: mv a1, s4
; RV32M-NEXT: li a3, 0
-; RV32M-NEXT: call __moddi3@plt
+; RV32M-NEXT: call __moddi3
; RV32M-NEXT: mv s2, a0
; RV32M-NEXT: mv s4, a1
; RV32M-NEXT: li a2, -5
; RV32M-NEXT: li a3, -1
; RV32M-NEXT: mv a0, s1
; RV32M-NEXT: mv a1, s3
-; RV32M-NEXT: call __moddi3@plt
+; RV32M-NEXT: call __moddi3
; RV32M-NEXT: or a2, s5, s6
; RV32M-NEXT: snez a2, a2
; RV32M-NEXT: xori a0, a0, 2
@@ -632,7 +632,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: srai a1, a1, 31
; RV32MV-NEXT: li a2, 6
; RV32MV-NEXT: li a3, 0
-; RV32MV-NEXT: call __moddi3@plt
+; RV32MV-NEXT: call __moddi3
; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: vmv.v.x v8, a0
; RV32MV-NEXT: vslide1down.vx v8, v8, a1
@@ -642,7 +642,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: mv a0, s2
; RV32MV-NEXT: mv a1, s4
; RV32MV-NEXT: li a3, 0
-; RV32MV-NEXT: call __moddi3@plt
+; RV32MV-NEXT: call __moddi3
; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: addi a2, sp, 16
; RV32MV-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
@@ -654,7 +654,7 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: li a3, -1
; RV32MV-NEXT: mv a0, s1
; RV32MV-NEXT: mv a1, s3
-; RV32MV-NEXT: call __moddi3@plt
+; RV32MV-NEXT: call __moddi3
; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: addi a2, sp, 16
; RV32MV-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
index 3335ca3a34b6..ec6e978c2c68 100644
--- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll
@@ -25,19 +25,19 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: li a1, -124
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 98
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: li a1, -1003
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: sh a0, 6(s3)
; RV32I-NEXT: sh s1, 4(s3)
; RV32I-NEXT: sh s2, 2(s3)
@@ -117,19 +117,19 @@ define <4 x i16> @fold_srem_vec_1(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: li a1, -124
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 98
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a1, -1003
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: sh a0, 6(s3)
; RV64I-NEXT: sh s1, 4(s3)
; RV64I-NEXT: sh s2, 2(s3)
@@ -213,19 +213,19 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: sh a0, 6(s3)
; RV32I-NEXT: sh s1, 4(s3)
; RV32I-NEXT: sh s2, 2(s3)
@@ -298,19 +298,19 @@ define <4 x i16> @fold_srem_vec_2(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: sh a0, 6(s3)
; RV64I-NEXT: sh s1, 4(s3)
; RV64I-NEXT: sh s2, 2(s3)
@@ -393,35 +393,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s4
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s5, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s3
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s6, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s7, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s8, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s4
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s3
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __divsi3@plt
+; RV32I-NEXT: call __divsi3
; RV32I-NEXT: add a0, s8, a0
; RV32I-NEXT: add s2, s7, s2
; RV32I-NEXT: add s3, s6, s3
@@ -510,35 +510,35 @@ define <4 x i16> @combine_srem_sdiv(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s4
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s5, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s3
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s6, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s7, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s8, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s4
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s3
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __divdi3@plt
+; RV64I-NEXT: call __divdi3
; RV64I-NEXT: add a0, s8, a0
; RV64I-NEXT: add s2, s7, s2
; RV64I-NEXT: add s3, s6, s3
@@ -640,7 +640,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
; RV32I-NEXT: andi a1, a1, -8
; RV32I-NEXT: sub s3, a3, a1
; RV32I-NEXT: li a1, 95
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: sh a0, 6(s0)
; RV32I-NEXT: sh s3, 4(s0)
; RV32I-NEXT: sh s2, 2(s0)
@@ -713,7 +713,7 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) nounwind {
; RV64I-NEXT: andi a1, a1, -8
; RV64I-NEXT: subw s3, a3, a1
; RV64I-NEXT: li a1, 95
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: sh a0, 6(s0)
; RV64I-NEXT: sh s3, 4(s0)
; RV64I-NEXT: sh s2, 2(s0)
@@ -779,16 +779,16 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 654
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 23
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a1, a0, 1327
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: sh a0, 6(s2)
; RV32I-NEXT: sh s1, 4(s2)
; RV32I-NEXT: sh s3, 2(s2)
@@ -856,16 +856,16 @@ define <4 x i16> @dont_fold_srem_one(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 654
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 23
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a1, a0, 1327
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: sh a0, 6(s2)
; RV64I-NEXT: sh s1, 4(s2)
; RV64I-NEXT: sh s3, 2(s2)
@@ -941,12 +941,12 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
; RV32I-NEXT: and a1, a1, a3
; RV32I-NEXT: sub s3, a2, a1
; RV32I-NEXT: li a1, 23
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a1, a0, 1327
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __modsi3@plt
+; RV32I-NEXT: call __modsi3
; RV32I-NEXT: sh a0, 6(s0)
; RV32I-NEXT: sh s2, 4(s0)
; RV32I-NEXT: sh zero, 0(s0)
@@ -1013,12 +1013,12 @@ define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) nounwind {
; RV64I-NEXT: and a1, a1, a3
; RV64I-NEXT: subw s3, a2, a1
; RV64I-NEXT: li a1, 23
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a1, a0, 1327
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: sh a0, 6(s0)
; RV64I-NEXT: sh s2, 4(s0)
; RV64I-NEXT: sh zero, 0(s0)
@@ -1097,21 +1097,21 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: li a2, 1
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __moddi3@plt
+; RV32I-NEXT: call __moddi3
; RV32I-NEXT: mv s7, a0
; RV32I-NEXT: mv s8, a1
; RV32I-NEXT: li a2, 654
; RV32I-NEXT: mv a0, s4
; RV32I-NEXT: mv a1, s5
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __moddi3@plt
+; RV32I-NEXT: call __moddi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: li a2, 23
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __moddi3@plt
+; RV32I-NEXT: call __moddi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: lui a0, 1
@@ -1119,7 +1119,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __moddi3@plt
+; RV32I-NEXT: call __moddi3
; RV32I-NEXT: sw a1, 28(s6)
; RV32I-NEXT: sw a0, 24(s6)
; RV32I-NEXT: sw s3, 20(s6)
@@ -1166,21 +1166,21 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: li a2, 1
; RV32IM-NEXT: mv a0, a3
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __moddi3@plt
+; RV32IM-NEXT: call __moddi3
; RV32IM-NEXT: mv s7, a0
; RV32IM-NEXT: mv s8, a1
; RV32IM-NEXT: li a2, 654
; RV32IM-NEXT: mv a0, s4
; RV32IM-NEXT: mv a1, s5
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __moddi3@plt
+; RV32IM-NEXT: call __moddi3
; RV32IM-NEXT: mv s4, a0
; RV32IM-NEXT: mv s5, a1
; RV32IM-NEXT: li a2, 23
; RV32IM-NEXT: mv a0, s2
; RV32IM-NEXT: mv a1, s3
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __moddi3@plt
+; RV32IM-NEXT: call __moddi3
; RV32IM-NEXT: mv s2, a0
; RV32IM-NEXT: mv s3, a1
; RV32IM-NEXT: lui a0, 1
@@ -1188,7 +1188,7 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: mv a0, s0
; RV32IM-NEXT: mv a1, s1
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __moddi3@plt
+; RV32IM-NEXT: call __moddi3
; RV32IM-NEXT: sw a1, 28(s6)
; RV32IM-NEXT: sw a0, 24(s6)
; RV32IM-NEXT: sw s3, 20(s6)
@@ -1224,16 +1224,16 @@ define <4 x i64> @dont_fold_srem_i64(<4 x i64> %x) nounwind {
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 654
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 23
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a1, a0, 1327
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __moddi3@plt
+; RV64I-NEXT: call __moddi3
; RV64I-NEXT: sd a0, 24(s2)
; RV64I-NEXT: sd s1, 16(s2)
; RV64I-NEXT: sd s3, 8(s2)
diff --git a/llvm/test/CodeGen/RISCV/stack-protector-target.ll b/llvm/test/CodeGen/RISCV/stack-protector-target.ll
index 410b89df1f35..13abde7878f3 100644
--- a/llvm/test/CodeGen/RISCV/stack-protector-target.ll
+++ b/llvm/test/CodeGen/RISCV/stack-protector-target.ll
@@ -12,7 +12,7 @@ define void @func() sspreq nounwind {
; FUCHSIA-RISCV64-NEXT: ld a0, -16(tp)
; FUCHSIA-RISCV64-NEXT: sd a0, 16(sp)
; FUCHSIA-RISCV64-NEXT: addi a0, sp, 12
-; FUCHSIA-RISCV64-NEXT: call capture@plt
+; FUCHSIA-RISCV64-NEXT: call capture
; FUCHSIA-RISCV64-NEXT: ld a0, -16(tp)
; FUCHSIA-RISCV64-NEXT: ld a1, 16(sp)
; FUCHSIA-RISCV64-NEXT: bne a0, a1, .LBB0_2
@@ -21,7 +21,7 @@ define void @func() sspreq nounwind {
; FUCHSIA-RISCV64-NEXT: addi sp, sp, 32
; FUCHSIA-RISCV64-NEXT: ret
; FUCHSIA-RISCV64-NEXT: .LBB0_2: # %CallStackCheckFailBlk
-; FUCHSIA-RISCV64-NEXT: call __stack_chk_fail@plt
+; FUCHSIA-RISCV64-NEXT: call __stack_chk_fail
%1 = alloca i32, align 4
call void @capture(ptr %1)
ret void
diff --git a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
index f10dfbd313cc..c93153e8f9d1 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment-with-variable-sized-objects.ll
@@ -26,7 +26,7 @@ define void @caller(i32 %n) {
; RV32I-NEXT: sub a0, sp, a0
; RV32I-NEXT: mv sp, a0
; RV32I-NEXT: mv a1, s1
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -64
; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
@@ -55,7 +55,7 @@ define void @caller(i32 %n) {
; RV64I-NEXT: sub a0, sp, a0
; RV64I-NEXT: mv sp, a0
; RV64I-NEXT: mv a1, s1
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -64
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/stack-realignment.ll b/llvm/test/CodeGen/RISCV/stack-realignment.ll
index d87db54fd5bf..afa8efedbff3 100644
--- a/llvm/test/CodeGen/RISCV/stack-realignment.ll
+++ b/llvm/test/CodeGen/RISCV/stack-realignment.ll
@@ -19,7 +19,7 @@ define void @caller32() {
; RV32I-NEXT: .cfi_def_cfa s0, 0
; RV32I-NEXT: andi sp, sp, -32
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -32
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
@@ -38,7 +38,7 @@ define void @caller32() {
; RV64I-NEXT: .cfi_def_cfa s0, 0
; RV64I-NEXT: andi sp, sp, -32
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -32
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -57,7 +57,7 @@ define void @caller_no_realign32() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -69,7 +69,7 @@ define void @caller_no_realign32() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -91,7 +91,7 @@ define void @caller64() {
; RV32I-NEXT: .cfi_def_cfa s0, 0
; RV32I-NEXT: andi sp, sp, -64
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -64
; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
@@ -110,7 +110,7 @@ define void @caller64() {
; RV64I-NEXT: .cfi_def_cfa s0, 0
; RV64I-NEXT: andi sp, sp, -64
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -64
; RV64I-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
@@ -129,7 +129,7 @@ define void @caller_no_realign64() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -141,7 +141,7 @@ define void @caller_no_realign64() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -163,7 +163,7 @@ define void @caller128() {
; RV32I-NEXT: .cfi_def_cfa s0, 0
; RV32I-NEXT: andi sp, sp, -128
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -128
; RV32I-NEXT: lw ra, 124(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 120(sp) # 4-byte Folded Reload
@@ -182,7 +182,7 @@ define void @caller128() {
; RV64I-NEXT: .cfi_def_cfa s0, 0
; RV64I-NEXT: andi sp, sp, -128
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -128
; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 112(sp) # 8-byte Folded Reload
@@ -201,7 +201,7 @@ define void @caller_no_realign128() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -213,7 +213,7 @@ define void @caller_no_realign128() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -235,7 +235,7 @@ define void @caller256() {
; RV32I-NEXT: .cfi_def_cfa s0, 0
; RV32I-NEXT: andi sp, sp, -256
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -256
; RV32I-NEXT: lw ra, 252(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 248(sp) # 4-byte Folded Reload
@@ -254,7 +254,7 @@ define void @caller256() {
; RV64I-NEXT: .cfi_def_cfa s0, 0
; RV64I-NEXT: andi sp, sp, -256
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -256
; RV64I-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
@@ -273,7 +273,7 @@ define void @caller_no_realign256() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -285,7 +285,7 @@ define void @caller_no_realign256() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -307,7 +307,7 @@ define void @caller512() {
; RV32I-NEXT: .cfi_def_cfa s0, 0
; RV32I-NEXT: andi sp, sp, -512
; RV32I-NEXT: addi a0, sp, 512
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -1024
; RV32I-NEXT: lw ra, 1020(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 1016(sp) # 4-byte Folded Reload
@@ -326,7 +326,7 @@ define void @caller512() {
; RV64I-NEXT: .cfi_def_cfa s0, 0
; RV64I-NEXT: andi sp, sp, -512
; RV64I-NEXT: addi a0, sp, 512
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -1024
; RV64I-NEXT: ld ra, 1016(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 1008(sp) # 8-byte Folded Reload
@@ -345,7 +345,7 @@ define void @caller_no_realign512() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -357,7 +357,7 @@ define void @caller_no_realign512() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -380,7 +380,7 @@ define void @caller1024() {
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: andi sp, sp, -1024
; RV32I-NEXT: addi a0, sp, 1024
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: addi sp, s0, -2048
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload
@@ -401,7 +401,7 @@ define void @caller1024() {
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: andi sp, sp, -1024
; RV64I-NEXT: addi a0, sp, 1024
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: addi sp, s0, -2048
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
@@ -421,7 +421,7 @@ define void @caller_no_realign1024() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -433,7 +433,7 @@ define void @caller_no_realign1024() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -458,7 +458,7 @@ define void @caller2048() {
; RV32I-NEXT: andi sp, sp, -2048
; RV32I-NEXT: addi a0, sp, 2047
; RV32I-NEXT: addi a0, a0, 1
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: sub sp, s0, a0
; RV32I-NEXT: addi sp, sp, 2032
@@ -483,7 +483,7 @@ define void @caller2048() {
; RV64I-NEXT: andi sp, sp, -2048
; RV64I-NEXT: addi a0, sp, 2047
; RV64I-NEXT: addi a0, a0, 1
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: sub sp, s0, a0
; RV64I-NEXT: addi sp, sp, 2032
@@ -505,7 +505,7 @@ define void @caller_no_realign2048() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -517,7 +517,7 @@ define void @caller_no_realign2048() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -544,7 +544,7 @@ define void @caller4096() {
; RV32I-NEXT: slli sp, a0, 12
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: add a0, sp, a0
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lui a0, 2
; RV32I-NEXT: sub sp, s0, a0
; RV32I-NEXT: addi a0, a0, -2032
@@ -571,7 +571,7 @@ define void @caller4096() {
; RV64I-NEXT: slli sp, a0, 12
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: add a0, sp, a0
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: lui a0, 2
; RV64I-NEXT: sub sp, s0, a0
; RV64I-NEXT: addiw a0, a0, -2032
@@ -593,7 +593,7 @@ define void @caller_no_realign4096() "no-realign-stack" {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: .cfi_offset ra, -4
; RV32I-NEXT: mv a0, sp
-; RV32I-NEXT: call callee@plt
+; RV32I-NEXT: call callee
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -605,7 +605,7 @@ define void @caller_no_realign4096() "no-realign-stack" {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
; RV64I-NEXT: mv a0, sp
-; RV64I-NEXT: call callee@plt
+; RV64I-NEXT: call callee
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/stack-slot-size.ll b/llvm/test/CodeGen/RISCV/stack-slot-size.ll
index ab6bd20e2f22..1388eaac3a67 100644
--- a/llvm/test/CodeGen/RISCV/stack-slot-size.ll
+++ b/llvm/test/CodeGen/RISCV/stack-slot-size.ll
@@ -26,7 +26,7 @@ define i32 @caller129() nounwind {
; RV32I-NEXT: sw zero, 4(sp)
; RV32I-NEXT: mv a0, sp
; RV32I-NEXT: sw zero, 0(sp)
-; RV32I-NEXT: call callee129@plt
+; RV32I-NEXT: call callee129
; RV32I-NEXT: lw a0, 24(sp)
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
@@ -42,7 +42,7 @@ define i32 @caller129() nounwind {
; RV64I-NEXT: sd zero, 8(sp)
; RV64I-NEXT: mv a0, sp
; RV64I-NEXT: sd zero, 0(sp)
-; RV64I-NEXT: call callee129@plt
+; RV64I-NEXT: call callee129
; RV64I-NEXT: lw a0, 36(sp)
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
@@ -67,7 +67,7 @@ define i32 @caller160() nounwind {
; RV32I-NEXT: sw zero, 4(sp)
; RV32I-NEXT: mv a0, sp
; RV32I-NEXT: sw zero, 0(sp)
-; RV32I-NEXT: call callee160@plt
+; RV32I-NEXT: call callee160
; RV32I-NEXT: lw a0, 24(sp)
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
@@ -83,7 +83,7 @@ define i32 @caller160() nounwind {
; RV64I-NEXT: sd zero, 8(sp)
; RV64I-NEXT: mv a0, sp
; RV64I-NEXT: sd zero, 0(sp)
-; RV64I-NEXT: call callee160@plt
+; RV64I-NEXT: call callee160
; RV64I-NEXT: lw a0, 36(sp)
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
@@ -109,7 +109,7 @@ define i32 @caller161() nounwind {
; RV32I-NEXT: sw zero, 4(sp)
; RV32I-NEXT: mv a0, sp
; RV32I-NEXT: sw zero, 0(sp)
-; RV32I-NEXT: call callee161@plt
+; RV32I-NEXT: call callee161
; RV32I-NEXT: lw a0, 24(sp)
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 32
@@ -125,7 +125,7 @@ define i32 @caller161() nounwind {
; RV64I-NEXT: sd zero, 8(sp)
; RV64I-NEXT: mv a0, sp
; RV64I-NEXT: sd zero, 0(sp)
-; RV64I-NEXT: call callee161@plt
+; RV64I-NEXT: call callee161
; RV64I-NEXT: lw a0, 36(sp)
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 48
diff --git a/llvm/test/CodeGen/RISCV/stack-store-check.ll b/llvm/test/CodeGen/RISCV/stack-store-check.ll
index 651df94bab49..91cfb2a4cef7 100644
--- a/llvm/test/CodeGen/RISCV/stack-store-check.ll
+++ b/llvm/test/CodeGen/RISCV/stack-store-check.ll
@@ -44,7 +44,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 600
; CHECK-NEXT: addi a2, sp, 584
; CHECK-NEXT: sw s6, 584(sp)
-; CHECK-NEXT: call __subtf3@plt
+; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw s1, 616(sp)
; CHECK-NEXT: lw s2, 620(sp)
; CHECK-NEXT: lw s3, 624(sp)
@@ -60,7 +60,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 552
; CHECK-NEXT: addi a2, sp, 536
; CHECK-NEXT: sw s1, 552(sp)
-; CHECK-NEXT: call __subtf3@plt
+; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw a0, 568(sp)
; CHECK-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 572(sp)
@@ -80,7 +80,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 504
; CHECK-NEXT: addi a2, sp, 488
; CHECK-NEXT: sw s6, 504(sp)
-; CHECK-NEXT: call __addtf3@plt
+; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw s9, 520(sp)
; CHECK-NEXT: lw s11, 524(sp)
; CHECK-NEXT: lw s5, 528(sp)
@@ -106,7 +106,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 312
; CHECK-NEXT: addi a2, sp, 296
; CHECK-NEXT: sw s1, 312(sp)
-; CHECK-NEXT: call __multf3@plt
+; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw a0, 328(sp)
; CHECK-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 332(sp)
@@ -126,7 +126,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 456
; CHECK-NEXT: addi a2, sp, 440
; CHECK-NEXT: sw s9, 440(sp)
-; CHECK-NEXT: call __addtf3@plt
+; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw a3, 472(sp)
; CHECK-NEXT: lw a0, 476(sp)
; CHECK-NEXT: lw a1, 480(sp)
@@ -142,7 +142,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 408
; CHECK-NEXT: addi a2, sp, 392
; CHECK-NEXT: sw a3, 392(sp)
-; CHECK-NEXT: call __subtf3@plt
+; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw a0, 424(sp)
; CHECK-NEXT: lw a1, 436(sp)
; CHECK-NEXT: lw a2, 432(sp)
@@ -171,7 +171,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a2, sp, 200
; CHECK-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s0, 216(sp)
-; CHECK-NEXT: call __multf3@plt
+; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw s1, 232(sp)
; CHECK-NEXT: lw a0, 236(sp)
; CHECK-NEXT: sw a0, 0(sp) # 4-byte Folded Spill
@@ -189,7 +189,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 360
; CHECK-NEXT: addi a2, sp, 344
; CHECK-NEXT: sw s9, 360(sp)
-; CHECK-NEXT: call __multf3@plt
+; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw a0, 376(sp)
; CHECK-NEXT: lw a1, 388(sp)
; CHECK-NEXT: lw a2, 384(sp)
@@ -215,7 +215,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a2, sp, 248
; CHECK-NEXT: lw a3, 44(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a3, 264(sp)
-; CHECK-NEXT: call __subtf3@plt
+; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw a0, 280(sp)
; CHECK-NEXT: lw a1, 292(sp)
; CHECK-NEXT: lw a2, 288(sp)
@@ -237,7 +237,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 168
; CHECK-NEXT: addi a2, sp, 152
; CHECK-NEXT: sw s1, 168(sp)
-; CHECK-NEXT: call __addtf3@plt
+; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw a0, 184(sp)
; CHECK-NEXT: lw a1, 196(sp)
; CHECK-NEXT: lw a2, 192(sp)
@@ -259,7 +259,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a2, sp, 104
; CHECK-NEXT: lw a3, 52(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a3, 120(sp)
-; CHECK-NEXT: call __multf3@plt
+; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw a3, 136(sp)
; CHECK-NEXT: lw a0, 140(sp)
; CHECK-NEXT: lw a1, 144(sp)
@@ -276,7 +276,7 @@ define void @main() local_unnamed_addr nounwind {
; CHECK-NEXT: addi a1, sp, 72
; CHECK-NEXT: addi a2, sp, 56
; CHECK-NEXT: sw a3, 72(sp)
-; CHECK-NEXT: call __addtf3@plt
+; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw a0, 96(sp)
; CHECK-NEXT: lw a1, 100(sp)
; CHECK-NEXT: lw a2, 88(sp)
diff --git a/llvm/test/CodeGen/RISCV/tls-models.ll b/llvm/test/CodeGen/RISCV/tls-models.ll
index d9b37cb0c7c2..c2ed44073baa 100644
--- a/llvm/test/CodeGen/RISCV/tls-models.ll
+++ b/llvm/test/CodeGen/RISCV/tls-models.ll
@@ -26,7 +26,7 @@ define ptr @f1() nounwind {
; RV32-PIC-NEXT: .Lpcrel_hi0:
; RV32-PIC-NEXT: auipc a0, %tls_gd_pcrel_hi(unspecified)
; RV32-PIC-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi0)
-; RV32-PIC-NEXT: call __tls_get_addr@plt
+; RV32-PIC-NEXT: call __tls_get_addr
; RV32-PIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-PIC-NEXT: addi sp, sp, 16
; RV32-PIC-NEXT: ret
@@ -38,7 +38,7 @@ define ptr @f1() nounwind {
; RV64-PIC-NEXT: .Lpcrel_hi0:
; RV64-PIC-NEXT: auipc a0, %tls_gd_pcrel_hi(unspecified)
; RV64-PIC-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi0)
-; RV64-PIC-NEXT: call __tls_get_addr@plt
+; RV64-PIC-NEXT: call __tls_get_addr
; RV64-PIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-PIC-NEXT: addi sp, sp, 16
; RV64-PIC-NEXT: ret
@@ -73,7 +73,7 @@ define ptr @f2() nounwind {
; RV32-PIC-NEXT: .Lpcrel_hi1:
; RV32-PIC-NEXT: auipc a0, %tls_gd_pcrel_hi(ld)
; RV32-PIC-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi1)
-; RV32-PIC-NEXT: call __tls_get_addr@plt
+; RV32-PIC-NEXT: call __tls_get_addr
; RV32-PIC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-PIC-NEXT: addi sp, sp, 16
; RV32-PIC-NEXT: ret
@@ -85,7 +85,7 @@ define ptr @f2() nounwind {
; RV64-PIC-NEXT: .Lpcrel_hi1:
; RV64-PIC-NEXT: auipc a0, %tls_gd_pcrel_hi(ld)
; RV64-PIC-NEXT: addi a0, a0, %pcrel_lo(.Lpcrel_hi1)
-; RV64-PIC-NEXT: call __tls_get_addr@plt
+; RV64-PIC-NEXT: call __tls_get_addr
; RV64-PIC-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-PIC-NEXT: addi sp, sp, 16
; RV64-PIC-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
index 0772109a5525..22c0b798e146 100644
--- a/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
+++ b/llvm/test/CodeGen/RISCV/unfold-masked-merge-scalar-variablemask.ll
@@ -915,7 +915,7 @@ define i32 @in_multiuse_A(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
; RV32-NEXT: xor a0, a0, a1
; RV32-NEXT: and s1, a0, a3
; RV32-NEXT: mv a0, s1
-; RV32-NEXT: call use32@plt
+; RV32-NEXT: call use32
; RV32-NEXT: xor a0, s1, s0
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -933,7 +933,7 @@ define i32 @in_multiuse_A(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
; RV64-NEXT: xor a0, a0, a1
; RV64-NEXT: and s1, a0, a3
; RV64-NEXT: mv a0, s1
-; RV64-NEXT: call use32@plt
+; RV64-NEXT: call use32
; RV64-NEXT: xor a0, s1, s0
; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -957,7 +957,7 @@ define i32 @in_multiuse_B(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
; RV32-NEXT: mv s0, a1
; RV32-NEXT: xor a0, a0, a1
; RV32-NEXT: and s1, a0, a3
-; RV32-NEXT: call use32@plt
+; RV32-NEXT: call use32
; RV32-NEXT: xor a0, s1, s0
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -974,7 +974,7 @@ define i32 @in_multiuse_B(i32 %x, i32 %y, i32 %z, i32 %mask) nounwind {
; RV64-NEXT: mv s0, a1
; RV64-NEXT: xor a0, a0, a1
; RV64-NEXT: and s1, a0, a3
-; RV64-NEXT: call use32@plt
+; RV64-NEXT: call use32
; RV64-NEXT: xor a0, s1, s0
; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll
index 1b2cc1398ec1..f83a933c0b5c 100644
--- a/llvm/test/CodeGen/RISCV/urem-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll
@@ -12,7 +12,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
; RV32I-LABEL: fold_urem_positive_odd:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 95
-; RV32I-NEXT: tail __umodsi3@plt
+; RV32I-NEXT: tail __umodsi3
;
; RV32IM-LABEL: fold_urem_positive_odd:
; RV32IM: # %bb.0:
@@ -35,7 +35,7 @@ define i32 @fold_urem_positive_odd(i32 %x) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 95
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -65,7 +65,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
; RV32I-LABEL: fold_urem_positive_even:
; RV32I: # %bb.0:
; RV32I-NEXT: li a1, 1060
-; RV32I-NEXT: tail __umodsi3@plt
+; RV32I-NEXT: tail __umodsi3
;
; RV32IM-LABEL: fold_urem_positive_even:
; RV32IM: # %bb.0:
@@ -85,7 +85,7 @@ define i32 @fold_urem_positive_even(i32 %x) nounwind {
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: li a1, 1060
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
@@ -117,11 +117,11 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 95
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: add a0, s1, a0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -154,11 +154,11 @@ define i32 @combine_urem_udiv(i32 %x) nounwind {
; RV64I-NEXT: srli s0, a0, 32
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: add a0, s1, a0
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
@@ -226,7 +226,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: li a2, 98
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __umoddi3@plt
+; RV32I-NEXT: call __umoddi3
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
@@ -237,7 +237,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
; RV32IM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IM-NEXT: li a2, 98
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __umoddi3@plt
+; RV32IM-NEXT: call __umoddi3
; RV32IM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IM-NEXT: addi sp, sp, 16
; RV32IM-NEXT: ret
@@ -245,7 +245,7 @@ define i64 @dont_fold_urem_i64(i64 %x) nounwind {
; RV64I-LABEL: dont_fold_urem_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: li a1, 98
-; RV64I-NEXT: tail __umoddi3@plt
+; RV64I-NEXT: tail __umoddi3
;
; RV64IM-LABEL: dont_fold_urem_i64:
; RV64IM: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index f629c0d17891..4544cbaf8521 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -13,7 +13,7 @@ define i1 @test_urem_odd(i13 %X) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: lui a1, 1
; RV32-NEXT: addi a1, a1, -819
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a0, a0, 19
; RV32-NEXT: srli a0, a0, 19
; RV32-NEXT: sltiu a0, a0, 1639
@@ -27,7 +27,7 @@ define i1 @test_urem_odd(i13 %X) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: lui a1, 1
; RV64-NEXT: addiw a1, a1, -819
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a0, a0, 51
; RV64-NEXT: srli a0, a0, 51
; RV64-NEXT: sltiu a0, a0, 1639
@@ -86,7 +86,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: lui a1, 28087
; RV32-NEXT: addi a1, a1, -585
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a1, a0, 26
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: srli a0, a0, 6
@@ -106,7 +106,7 @@ define i1 @test_urem_even(i27 %X) nounwind {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: lui a1, 28087
; RV64-NEXT: addiw a1, a1, -585
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a1, a0, 26
; RV64-NEXT: slli a0, a0, 37
; RV64-NEXT: srli a0, a0, 38
@@ -259,7 +259,7 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: li a1, 307
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: andi a0, a0, 511
; RV32-NEXT: sltiu a0, a0, 2
; RV32-NEXT: xori a0, a0, 1
@@ -272,7 +272,7 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: li a1, 307
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: andi a0, a0, 511
; RV64-NEXT: sltiu a0, a0, 2
; RV64-NEXT: xori a0, a0, 1
@@ -338,7 +338,7 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV32-NEXT: srli s2, a1, 11
; RV32-NEXT: andi a0, a1, 2047
; RV32-NEXT: li a1, 683
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: slli a1, a0, 10
; RV32-NEXT: slli a0, a0, 21
; RV32-NEXT: srli a0, a0, 22
@@ -347,13 +347,13 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV32-NEXT: sltiu s3, a0, 342
; RV32-NEXT: li a1, 819
; RV32-NEXT: mv a0, s1
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: addi a0, a0, -1638
; RV32-NEXT: andi a0, a0, 2047
; RV32-NEXT: sltiu s1, a0, 2
; RV32-NEXT: li a1, 1463
; RV32-NEXT: mv a0, s2
-; RV32-NEXT: call __mulsi3@plt
+; RV32-NEXT: call __mulsi3
; RV32-NEXT: addi a0, a0, -1463
; RV32-NEXT: andi a0, a0, 2047
; RV32-NEXT: sltiu a0, a0, 293
@@ -395,7 +395,7 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV64-NEXT: srli s2, a0, 11
; RV64-NEXT: andi a0, a0, 2047
; RV64-NEXT: li a1, 683
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: slli a1, a0, 10
; RV64-NEXT: slli a0, a0, 53
; RV64-NEXT: srli a0, a0, 54
@@ -404,13 +404,13 @@ define void @test_urem_vec(ptr %X) nounwind {
; RV64-NEXT: sltiu s3, a0, 342
; RV64-NEXT: li a1, 1463
; RV64-NEXT: mv a0, s2
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: addi a0, a0, -1463
; RV64-NEXT: andi a0, a0, 2047
; RV64-NEXT: sltiu s2, a0, 293
; RV64-NEXT: li a1, 819
; RV64-NEXT: mv a0, s1
-; RV64-NEXT: call __muldi3@plt
+; RV64-NEXT: call __muldi3
; RV64-NEXT: addi a0, a0, -1638
; RV64-NEXT: andi a0, a0, 2047
; RV64-NEXT: sltiu a0, a0, 2
diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
index 32aca29d16e9..eea8e64f2ddd 100644
--- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
+++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll
@@ -26,19 +26,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: li a1, 124
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 98
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: li a1, 1003
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: sh a0, 6(s3)
; RV32I-NEXT: sh s1, 4(s3)
; RV32I-NEXT: sh s2, 2(s3)
@@ -104,19 +104,19 @@ define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: li a1, 124
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 98
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a1, 1003
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: sh a0, 6(s3)
; RV64I-NEXT: sh s1, 4(s3)
; RV64I-NEXT: sh s2, 2(s3)
@@ -186,19 +186,19 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: sh a0, 6(s3)
; RV32I-NEXT: sh s1, 4(s3)
; RV32I-NEXT: sh s2, 2(s3)
@@ -255,19 +255,19 @@ define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: sh a0, 6(s3)
; RV64I-NEXT: sh s1, 4(s3)
; RV64I-NEXT: sh s2, 2(s3)
@@ -334,35 +334,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s4
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s5, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s3
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s6, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s7, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s8, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s4
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s3
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s2
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __udivsi3@plt
+; RV32I-NEXT: call __udivsi3
; RV32I-NEXT: add a0, s8, a0
; RV32I-NEXT: add s2, s7, s2
; RV32I-NEXT: add s3, s6, s3
@@ -435,35 +435,35 @@ define <4 x i16> @combine_urem_udiv(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s4
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s5, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s3
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s6, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s7, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s8, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s4
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: mv s4, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s3
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s2
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __udivdi3@plt
+; RV64I-NEXT: call __udivdi3
; RV64I-NEXT: add a0, s8, a0
; RV64I-NEXT: add s2, s7, s2
; RV64I-NEXT: add s3, s6, s3
@@ -538,7 +538,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: li a1, 95
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: andi a1, s3, 63
; RV32I-NEXT: andi a2, s2, 31
; RV32I-NEXT: andi s1, s1, 7
@@ -590,7 +590,7 @@ define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: li a1, 95
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: andi a1, s3, 63
; RV64I-NEXT: andi a2, s2, 31
; RV64I-NEXT: andi s1, s1, 7
@@ -646,16 +646,16 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: li a1, 654
; RV32I-NEXT: mv a0, a2
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s3, a0
; RV32I-NEXT: li a1, 23
; RV32I-NEXT: mv a0, s1
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 1
; RV32I-NEXT: addi a1, a0, 1327
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: call __umodsi3@plt
+; RV32I-NEXT: call __umodsi3
; RV32I-NEXT: sh a0, 6(s2)
; RV32I-NEXT: sh s1, 4(s2)
; RV32I-NEXT: sh s3, 2(s2)
@@ -712,16 +712,16 @@ define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) nounwind {
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 654
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 23
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a1, a0, 1327
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: sh a0, 6(s2)
; RV64I-NEXT: sh s1, 4(s2)
; RV64I-NEXT: sh s3, 2(s2)
@@ -803,21 +803,21 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: li a2, 1
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __umoddi3@plt
+; RV32I-NEXT: call __umoddi3
; RV32I-NEXT: mv s7, a0
; RV32I-NEXT: mv s8, a1
; RV32I-NEXT: li a2, 654
; RV32I-NEXT: mv a0, s4
; RV32I-NEXT: mv a1, s5
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __umoddi3@plt
+; RV32I-NEXT: call __umoddi3
; RV32I-NEXT: mv s4, a0
; RV32I-NEXT: mv s5, a1
; RV32I-NEXT: li a2, 23
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: mv a1, s3
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __umoddi3@plt
+; RV32I-NEXT: call __umoddi3
; RV32I-NEXT: mv s2, a0
; RV32I-NEXT: mv s3, a1
; RV32I-NEXT: lui a0, 1
@@ -825,7 +825,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: li a3, 0
-; RV32I-NEXT: call __umoddi3@plt
+; RV32I-NEXT: call __umoddi3
; RV32I-NEXT: sw a1, 28(s6)
; RV32I-NEXT: sw a0, 24(s6)
; RV32I-NEXT: sw s3, 20(s6)
@@ -872,21 +872,21 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: li a2, 1
; RV32IM-NEXT: mv a0, a3
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __umoddi3@plt
+; RV32IM-NEXT: call __umoddi3
; RV32IM-NEXT: mv s7, a0
; RV32IM-NEXT: mv s8, a1
; RV32IM-NEXT: li a2, 654
; RV32IM-NEXT: mv a0, s4
; RV32IM-NEXT: mv a1, s5
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __umoddi3@plt
+; RV32IM-NEXT: call __umoddi3
; RV32IM-NEXT: mv s4, a0
; RV32IM-NEXT: mv s5, a1
; RV32IM-NEXT: li a2, 23
; RV32IM-NEXT: mv a0, s2
; RV32IM-NEXT: mv a1, s3
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __umoddi3@plt
+; RV32IM-NEXT: call __umoddi3
; RV32IM-NEXT: mv s2, a0
; RV32IM-NEXT: mv s3, a1
; RV32IM-NEXT: lui a0, 1
@@ -894,7 +894,7 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV32IM-NEXT: mv a0, s0
; RV32IM-NEXT: mv a1, s1
; RV32IM-NEXT: li a3, 0
-; RV32IM-NEXT: call __umoddi3@plt
+; RV32IM-NEXT: call __umoddi3
; RV32IM-NEXT: sw a1, 28(s6)
; RV32IM-NEXT: sw a0, 24(s6)
; RV32IM-NEXT: sw s3, 20(s6)
@@ -930,16 +930,16 @@ define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) nounwind {
; RV64I-NEXT: mv s2, a0
; RV64I-NEXT: li a1, 654
; RV64I-NEXT: mv a0, a2
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s3, a0
; RV64I-NEXT: li a1, 23
; RV64I-NEXT: mv a0, s1
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 1
; RV64I-NEXT: addiw a1, a0, 1327
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: call __umoddi3@plt
+; RV64I-NEXT: call __umoddi3
; RV64I-NEXT: sd a0, 24(s2)
; RV64I-NEXT: sd s1, 16(s2)
; RV64I-NEXT: sd s3, 8(s2)
diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll
index 67d1bfac4d61..8adce4bc466d 100644
--- a/llvm/test/CodeGen/RISCV/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/vararg.ll
@@ -269,7 +269,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; ILP32-ILP32F-FPELIM-NEXT: andi a0, a0, -16
; ILP32-ILP32F-FPELIM-NEXT: sub a0, sp, a0
; ILP32-ILP32F-FPELIM-NEXT: mv sp, a0
-; ILP32-ILP32F-FPELIM-NEXT: call notdead@plt
+; ILP32-ILP32F-FPELIM-NEXT: call notdead
; ILP32-ILP32F-FPELIM-NEXT: mv a0, s1
; ILP32-ILP32F-FPELIM-NEXT: addi sp, s0, -16
; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -299,7 +299,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; ILP32-ILP32F-WITHFP-NEXT: andi a0, a0, -16
; ILP32-ILP32F-WITHFP-NEXT: sub a0, sp, a0
; ILP32-ILP32F-WITHFP-NEXT: mv sp, a0
-; ILP32-ILP32F-WITHFP-NEXT: call notdead@plt
+; ILP32-ILP32F-WITHFP-NEXT: call notdead
; ILP32-ILP32F-WITHFP-NEXT: mv a0, s1
; ILP32-ILP32F-WITHFP-NEXT: addi sp, s0, -16
; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -329,7 +329,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: andi a0, a0, -16
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sub a0, sp, a0
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv sp, a0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call notdead@plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call notdead
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a0, s1
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, s0, -16
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -361,7 +361,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -16
; LP64-LP64F-LP64D-FPELIM-NEXT: sub a0, sp, a0
; LP64-LP64F-LP64D-FPELIM-NEXT: mv sp, a0
-; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead@plt
+; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead
; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, s1
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, s0, -32
; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -393,7 +393,7 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: andi a0, a0, -16
; LP64-LP64F-LP64D-WITHFP-NEXT: sub a0, sp, a0
; LP64-LP64F-LP64D-WITHFP-NEXT: mv sp, a0
-; LP64-LP64F-LP64D-WITHFP-NEXT: call notdead@plt
+; LP64-LP64F-LP64D-WITHFP-NEXT: call notdead
; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, s1
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, s0, -32
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -419,7 +419,7 @@ define void @va1_caller() nounwind {
; ILP32-ILP32F-FPELIM-NEXT: lui a3, 261888
; ILP32-ILP32F-FPELIM-NEXT: li a4, 2
; ILP32-ILP32F-FPELIM-NEXT: li a2, 0
-; ILP32-ILP32F-FPELIM-NEXT: call va1@plt
+; ILP32-ILP32F-FPELIM-NEXT: call va1
; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16
; ILP32-ILP32F-FPELIM-NEXT: ret
@@ -433,7 +433,7 @@ define void @va1_caller() nounwind {
; ILP32-ILP32F-WITHFP-NEXT: lui a3, 261888
; ILP32-ILP32F-WITHFP-NEXT: li a4, 2
; ILP32-ILP32F-WITHFP-NEXT: li a2, 0
-; ILP32-ILP32F-WITHFP-NEXT: call va1@plt
+; ILP32-ILP32F-WITHFP-NEXT: call va1
; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 16
@@ -446,7 +446,7 @@ define void @va1_caller() nounwind {
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a3, 261888
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a4, 2
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a2, 0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va1@plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va1
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret
@@ -458,7 +458,7 @@ define void @va1_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52
; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 2
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va1@plt
+; LP64-LP64F-LP64D-FPELIM-NEXT: call va1
; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16
; LP64-LP64F-LP64D-FPELIM-NEXT: ret
@@ -472,7 +472,7 @@ define void @va1_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52
; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 2
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va1@plt
+; LP64-LP64F-LP64D-WITHFP-NEXT: call va1
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 16
@@ -725,7 +725,7 @@ define void @va2_caller() nounwind {
; ILP32-ILP32F-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; ILP32-ILP32F-FPELIM-NEXT: lui a3, 261888
; ILP32-ILP32F-FPELIM-NEXT: li a2, 0
-; ILP32-ILP32F-FPELIM-NEXT: call va2@plt
+; ILP32-ILP32F-FPELIM-NEXT: call va2
; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16
; ILP32-ILP32F-FPELIM-NEXT: ret
@@ -738,7 +738,7 @@ define void @va2_caller() nounwind {
; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 16
; ILP32-ILP32F-WITHFP-NEXT: lui a3, 261888
; ILP32-ILP32F-WITHFP-NEXT: li a2, 0
-; ILP32-ILP32F-WITHFP-NEXT: call va2@plt
+; ILP32-ILP32F-WITHFP-NEXT: call va2
; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 16
@@ -750,7 +750,7 @@ define void @va2_caller() nounwind {
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a3, 261888
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a2, 0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va2@plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va2
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret
@@ -761,7 +761,7 @@ define void @va2_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va2@plt
+; LP64-LP64F-LP64D-FPELIM-NEXT: call va2
; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16
; LP64-LP64F-LP64D-FPELIM-NEXT: ret
@@ -774,7 +774,7 @@ define void @va2_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16
; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va2@plt
+; LP64-LP64F-LP64D-WITHFP-NEXT: call va2
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 16
@@ -1040,7 +1040,7 @@ define void @va3_caller() nounwind {
; ILP32-ILP32F-FPELIM-NEXT: lui a5, 262144
; ILP32-ILP32F-FPELIM-NEXT: li a2, 0
; ILP32-ILP32F-FPELIM-NEXT: li a4, 0
-; ILP32-ILP32F-FPELIM-NEXT: call va3@plt
+; ILP32-ILP32F-FPELIM-NEXT: call va3
; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16
; ILP32-ILP32F-FPELIM-NEXT: ret
@@ -1056,7 +1056,7 @@ define void @va3_caller() nounwind {
; ILP32-ILP32F-WITHFP-NEXT: lui a5, 262144
; ILP32-ILP32F-WITHFP-NEXT: li a2, 0
; ILP32-ILP32F-WITHFP-NEXT: li a4, 0
-; ILP32-ILP32F-WITHFP-NEXT: call va3@plt
+; ILP32-ILP32F-WITHFP-NEXT: call va3
; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 16
@@ -1071,7 +1071,7 @@ define void @va3_caller() nounwind {
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a5, 262144
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a2, 0
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a4, 0
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va3@plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va3
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret
@@ -1084,7 +1084,7 @@ define void @va3_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 62
; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 2
; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1111
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va3@plt
+; LP64-LP64F-LP64D-FPELIM-NEXT: call va3
; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16
; LP64-LP64F-LP64D-FPELIM-NEXT: ret
@@ -1099,7 +1099,7 @@ define void @va3_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a2, 62
; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 2
; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1111
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va3@plt
+; LP64-LP64F-LP64D-WITHFP-NEXT: call va3
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 16
@@ -1127,7 +1127,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; ILP32-ILP32F-FPELIM-NEXT: addi a0, sp, 24
; ILP32-ILP32F-FPELIM-NEXT: sw a0, 4(sp)
; ILP32-ILP32F-FPELIM-NEXT: sw a0, 0(sp)
-; ILP32-ILP32F-FPELIM-NEXT: call notdead@plt
+; ILP32-ILP32F-FPELIM-NEXT: call notdead
; ILP32-ILP32F-FPELIM-NEXT: lw a0, 4(sp)
; ILP32-ILP32F-FPELIM-NEXT: addi a0, a0, 3
; ILP32-ILP32F-FPELIM-NEXT: andi a0, a0, -4
@@ -1170,7 +1170,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; ILP32-ILP32F-WITHFP-NEXT: addi a0, s0, 8
; ILP32-ILP32F-WITHFP-NEXT: sw a0, -16(s0)
; ILP32-ILP32F-WITHFP-NEXT: sw a0, -20(s0)
-; ILP32-ILP32F-WITHFP-NEXT: call notdead@plt
+; ILP32-ILP32F-WITHFP-NEXT: call notdead
; ILP32-ILP32F-WITHFP-NEXT: lw a0, -16(s0)
; ILP32-ILP32F-WITHFP-NEXT: addi a0, a0, 3
; ILP32-ILP32F-WITHFP-NEXT: andi a0, a0, -4
@@ -1212,7 +1212,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, sp, 24
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 4(sp)
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 0(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call notdead@plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call notdead
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a0, 4(sp)
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, a0, 3
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: andi a0, a0, -4
@@ -1253,7 +1253,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 48
; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp)
; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead@plt
+; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead
; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 8(sp)
; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 3
; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -4
@@ -1296,7 +1296,7 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, s0, 16
; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -32(s0)
; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -40(s0)
-; LP64-LP64F-LP64D-WITHFP-NEXT: call notdead@plt
+; LP64-LP64F-LP64D-WITHFP-NEXT: call notdead
; LP64-LP64F-LP64D-WITHFP-NEXT: ld a0, -32(s0)
; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 3
; LP64-LP64F-LP64D-WITHFP-NEXT: andi a0, a0, -4
@@ -1384,7 +1384,7 @@ define void @va5_aligned_stack_caller() nounwind {
; ILP32-ILP32F-FPELIM-NEXT: li a4, 13
; ILP32-ILP32F-FPELIM-NEXT: li a7, 4
; ILP32-ILP32F-FPELIM-NEXT: sw a5, 32(sp)
-; ILP32-ILP32F-FPELIM-NEXT: call va5_aligned_stack_callee@plt
+; ILP32-ILP32F-FPELIM-NEXT: call va5_aligned_stack_callee
; ILP32-ILP32F-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 64
; ILP32-ILP32F-FPELIM-NEXT: ret
@@ -1429,7 +1429,7 @@ define void @va5_aligned_stack_caller() nounwind {
; ILP32-ILP32F-WITHFP-NEXT: li a4, 13
; ILP32-ILP32F-WITHFP-NEXT: li a7, 4
; ILP32-ILP32F-WITHFP-NEXT: sw a5, -32(s0)
-; ILP32-ILP32F-WITHFP-NEXT: call va5_aligned_stack_callee@plt
+; ILP32-ILP32F-WITHFP-NEXT: call va5_aligned_stack_callee
; ILP32-ILP32F-WITHFP-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 64
@@ -1473,7 +1473,7 @@ define void @va5_aligned_stack_caller() nounwind {
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a4, 13
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: li a7, 4
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a5, 32(sp)
-; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va5_aligned_stack_callee@plt
+; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va5_aligned_stack_callee
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 64
; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret
@@ -1503,7 +1503,7 @@ define void @va5_aligned_stack_caller() nounwind {
; LP64-LP64F-LP64D-FPELIM-NEXT: li a5, 13
; LP64-LP64F-LP64D-FPELIM-NEXT: li a7, 14
; LP64-LP64F-LP64D-FPELIM-NEXT: sd t0, 0(sp)
-; LP64-LP64F-LP64D-FPELIM-NEXT: call va5_aligned_stack_callee@plt
+; LP64-LP64F-LP64D-FPELIM-NEXT: call va5_aligned_stack_callee
; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 48
; LP64-LP64F-LP64D-FPELIM-NEXT: ret
@@ -1535,7 +1535,7 @@ define void @va5_aligned_stack_caller() nounwind {
; LP64-LP64F-LP64D-WITHFP-NEXT: li a5, 13
; LP64-LP64F-LP64D-WITHFP-NEXT: li a7, 14
; LP64-LP64F-LP64D-WITHFP-NEXT: sd t0, 0(sp)
-; LP64-LP64F-LP64D-WITHFP-NEXT: call va5_aligned_stack_callee@plt
+; LP64-LP64F-LP64D-WITHFP-NEXT: call va5_aligned_stack_callee
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 48
diff --git a/llvm/test/CodeGen/RISCV/vector-abi.ll b/llvm/test/CodeGen/RISCV/vector-abi.ll
index ad371a447438..9e786e576fcb 100644
--- a/llvm/test/CodeGen/RISCV/vector-abi.ll
+++ b/llvm/test/CodeGen/RISCV/vector-abi.ll
@@ -21,7 +21,7 @@ define void @caller() {
; RV32: SW killed [[ADDI3]], %stack.0, 0 :: (store (s32) into %stack.0)
; RV32: [[ADDI4:%[0-9]+]]:gpr = ADDI %stack.0, 0
; RV32: $x10 = COPY [[ADDI4]]
- ; RV32: PseudoCALL target-flags(riscv-plt) @callee, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
+ ; RV32: PseudoCALL target-flags(riscv-call) @callee, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
; RV32: ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
; RV32: PseudoRET
; RV64-LABEL: name: caller
@@ -41,7 +41,7 @@ define void @caller() {
; RV64: SD killed [[ADDI3]], %stack.0, 0 :: (store (s64) into %stack.0)
; RV64: [[ADDI4:%[0-9]+]]:gpr = ADDI %stack.0, 0
; RV64: $x10 = COPY [[ADDI4]]
- ; RV64: PseudoCALL target-flags(riscv-plt) @callee, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
+ ; RV64: PseudoCALL target-flags(riscv-call) @callee, csr_ilp32_lp64, implicit-def dead $x1, implicit $x10, implicit-def $x2
; RV64: ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
; RV64: PseudoRET
call void @callee(<4 x i8> <i8 4, i8 5, i8 6, i8 7>)
diff --git a/llvm/test/CodeGen/RISCV/vlenb.ll b/llvm/test/CodeGen/RISCV/vlenb.ll
index 6ce7f5372623..1d6c1b5d1acb 100644
--- a/llvm/test/CodeGen/RISCV/vlenb.ll
+++ b/llvm/test/CodeGen/RISCV/vlenb.ll
@@ -53,7 +53,7 @@ define i32 @sink_to_use_call() {
; CHECK-NEXT: .cfi_offset ra, -4
; CHECK-NEXT: .cfi_offset s0, -8
; CHECK-NEXT: csrr s0, vlenb
-; CHECK-NEXT: call unknown@plt
+; CHECK-NEXT: call unknown
; CHECK-NEXT: mv a0, s0
; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -75,7 +75,7 @@ define void @machine_licm() {
; CHECK-NEXT: .LBB4_1: # %loop
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: call use@plt
+; CHECK-NEXT: call use
; CHECK-NEXT: j .LBB4_1
entry:
br label %loop
diff --git a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
index 74bf1a8929cb..6d1521c719ed 100644
--- a/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
+++ b/llvm/test/CodeGen/RISCV/zbb-cmp-combine.ll
@@ -218,11 +218,11 @@ define i1 @flo(float %c, float %a, float %b) {
; CHECK-RV64I-NEXT: mv s1, a0
; CHECK-RV64I-NEXT: mv a0, a1
; CHECK-RV64I-NEXT: mv a1, s1
-; CHECK-RV64I-NEXT: call __gesf2@plt
+; CHECK-RV64I-NEXT: call __gesf2
; CHECK-RV64I-NEXT: mv s2, a0
; CHECK-RV64I-NEXT: mv a0, s0
; CHECK-RV64I-NEXT: mv a1, s1
-; CHECK-RV64I-NEXT: call __gesf2@plt
+; CHECK-RV64I-NEXT: call __gesf2
; CHECK-RV64I-NEXT: or a0, s2, a0
; CHECK-RV64I-NEXT: slti a0, a0, 0
; CHECK-RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -264,11 +264,11 @@ define i1 @dlo(double %c, double %a, double %b) {
; CHECK-NEXT: mv s1, a0
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: mv a1, s1
-; CHECK-NEXT: call __gedf2@plt
+; CHECK-NEXT: call __gedf2
; CHECK-NEXT: mv s2, a0
; CHECK-NEXT: mv a0, s0
; CHECK-NEXT: mv a1, s1
-; CHECK-NEXT: call __gedf2@plt
+; CHECK-NEXT: call __gedf2
; CHECK-NEXT: or a0, s2, a0
; CHECK-NEXT: slti a0, a0, 0
; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
index 72213019b8c8..93f95e9709b6 100644
--- a/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
+++ b/llvm/test/CodeGen/RISCV/zcmp-with-float.ll
@@ -15,7 +15,7 @@ define float @foo(float %arg) {
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: .cfi_offset fs0, -20
; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: call callee@plt
+; RV32-NEXT: call callee
; RV32-NEXT: fmv.s fa0, fs0
; RV32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: cm.popret {ra}, 32
@@ -28,7 +28,7 @@ define float @foo(float %arg) {
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: .cfi_offset fs0, -20
; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: call callee@plt
+; RV64-NEXT: call callee
; RV64-NEXT: fmv.s fa0, fs0
; RV64-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV64-NEXT: cm.popret {ra}, 32
@@ -48,12 +48,12 @@ define void @foo2(i32 %x, float %y) {
; RV32-NEXT: .cfi_offset fs0, -20
; RV32-NEXT: fmv.s fs0, fa0
; RV32-NEXT: mv s0, a0
-; RV32-NEXT: call bar@plt
+; RV32-NEXT: call bar
; RV32-NEXT: mv a0, s0
; RV32-NEXT: fmv.s fa0, fs0
; RV32-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: cm.pop {ra, s0}, 32
-; RV32-NEXT: tail func@plt
+; RV32-NEXT: tail func
;
; RV64-LABEL: foo2:
; RV64: # %bb.0: # %entry
@@ -65,12 +65,12 @@ define void @foo2(i32 %x, float %y) {
; RV64-NEXT: .cfi_offset fs0, -20
; RV64-NEXT: fmv.s fs0, fa0
; RV64-NEXT: mv s0, a0
-; RV64-NEXT: call bar@plt
+; RV64-NEXT: call bar
; RV64-NEXT: mv a0, s0
; RV64-NEXT: fmv.s fa0, fs0
; RV64-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV64-NEXT: cm.pop {ra, s0}, 32
-; RV64-NEXT: tail func@plt
+; RV64-NEXT: tail func
entry:
tail call void @bar()
tail call void @func(i32 %x, float %y)
diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
index 4d573d672bbd..348ca8e52962 100644
--- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll
@@ -68,7 +68,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call floorf@plt
+; RV32IZFH-NEXT: call floorf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -79,7 +79,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call floorf@plt
+; RV64IZFH-NEXT: call floorf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -90,7 +90,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call floorf@plt
+; RV32IZHINX-NEXT: call floorf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -101,7 +101,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call floorf@plt
+; RV64IZHINX-NEXT: call floorf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -112,7 +112,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call floorf@plt
+; RV32IZDINXZHINX-NEXT: call floorf
; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -123,7 +123,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINX-NEXT: addi sp, sp, -16
; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT: call floorf@plt
+; RV64IZDINXZHINX-NEXT: call floorf
; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -140,7 +140,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call ceilf@plt
+; RV32IZFH-NEXT: call ceilf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -151,7 +151,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call ceilf@plt
+; RV64IZFH-NEXT: call ceilf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -162,7 +162,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call ceilf@plt
+; RV32IZHINX-NEXT: call ceilf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -173,7 +173,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call ceilf@plt
+; RV64IZHINX-NEXT: call ceilf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -184,7 +184,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call ceilf@plt
+; RV32IZDINXZHINX-NEXT: call ceilf
; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -195,7 +195,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINX-NEXT: addi sp, sp, -16
; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT: call ceilf@plt
+; RV64IZDINXZHINX-NEXT: call ceilf
; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -212,7 +212,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call truncf@plt
+; RV32IZFH-NEXT: call truncf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -223,7 +223,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call truncf@plt
+; RV64IZFH-NEXT: call truncf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -234,7 +234,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call truncf@plt
+; RV32IZHINX-NEXT: call truncf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -245,7 +245,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call truncf@plt
+; RV64IZHINX-NEXT: call truncf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -256,7 +256,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call truncf@plt
+; RV32IZDINXZHINX-NEXT: call truncf
; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -267,7 +267,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINX-NEXT: addi sp, sp, -16
; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT: call truncf@plt
+; RV64IZDINXZHINX-NEXT: call truncf
; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -284,7 +284,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call rintf@plt
+; RV32IZFH-NEXT: call rintf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -295,7 +295,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call rintf@plt
+; RV64IZFH-NEXT: call rintf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -306,7 +306,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call rintf@plt
+; RV32IZHINX-NEXT: call rintf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -317,7 +317,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call rintf@plt
+; RV64IZHINX-NEXT: call rintf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -328,7 +328,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call rintf@plt
+; RV32IZDINXZHINX-NEXT: call rintf
; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -339,7 +339,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINX-NEXT: addi sp, sp, -16
; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT: call rintf@plt
+; RV64IZDINXZHINX-NEXT: call rintf
; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -356,7 +356,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call nearbyintf@plt
+; RV32IZFH-NEXT: call nearbyintf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -367,7 +367,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call nearbyintf@plt
+; RV64IZFH-NEXT: call nearbyintf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -378,7 +378,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call nearbyintf@plt
+; RV32IZHINX-NEXT: call nearbyintf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -389,7 +389,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call nearbyintf@plt
+; RV64IZHINX-NEXT: call nearbyintf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -400,7 +400,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call nearbyintf@plt
+; RV32IZDINXZHINX-NEXT: call nearbyintf
; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -411,7 +411,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINX-NEXT: addi sp, sp, -16
; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT: call nearbyintf@plt
+; RV64IZDINXZHINX-NEXT: call nearbyintf
; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -428,7 +428,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call roundf@plt
+; RV32IZFH-NEXT: call roundf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -439,7 +439,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call roundf@plt
+; RV64IZFH-NEXT: call roundf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -450,7 +450,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call roundf@plt
+; RV32IZHINX-NEXT: call roundf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -461,7 +461,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call roundf@plt
+; RV64IZHINX-NEXT: call roundf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -472,7 +472,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call roundf@plt
+; RV32IZDINXZHINX-NEXT: call roundf
; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -483,7 +483,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINX-NEXT: addi sp, sp, -16
; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT: call roundf@plt
+; RV64IZDINXZHINX-NEXT: call roundf
; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -500,7 +500,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call roundevenf@plt
+; RV32IZFH-NEXT: call roundevenf
; RV32IZFH-NEXT: fcvt.h.s fa0, fa0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
@@ -511,7 +511,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV64IZFH-NEXT: addi sp, sp, -16
; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: call roundevenf@plt
+; RV64IZFH-NEXT: call roundevenf
; RV64IZFH-NEXT: fcvt.h.s fa0, fa0
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
@@ -522,7 +522,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call roundevenf@plt
+; RV32IZHINX-NEXT: call roundevenf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
@@ -533,7 +533,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: call roundevenf@plt
+; RV64IZHINX-NEXT: call roundevenf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: addi sp, sp, 16
@@ -544,7 +544,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call roundevenf@plt
+; RV32IZDINXZHINX-NEXT: call roundevenf
; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -555,7 +555,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINX-NEXT: addi sp, sp, -16
; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINX-NEXT: call roundevenf@plt
+; RV64IZDINXZHINX-NEXT: call roundevenf
; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINX-NEXT: addi sp, sp, 16
@@ -644,7 +644,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call llrintf@plt
+; RV32IZFH-NEXT: call llrintf
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -659,7 +659,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call llrintf@plt
+; RV32IZHINX-NEXT: call llrintf
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -674,7 +674,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call llrintf@plt
+; RV32IZDINXZHINX-NEXT: call llrintf
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
@@ -695,7 +695,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: call llroundf@plt
+; RV32IZFH-NEXT: call llroundf
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
@@ -710,7 +710,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: call llroundf@plt
+; RV32IZHINX-NEXT: call llroundf
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: addi sp, sp, 16
; RV32IZHINX-NEXT: ret
@@ -725,7 +725,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINX-NEXT: addi sp, sp, -16
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: call llroundf@plt
+; RV32IZDINXZHINX-NEXT: call llroundf
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: addi sp, sp, 16
; RV32IZDINXZHINX-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
index 0475b941ec0f..097d1e0f6ee5 100644
--- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll
@@ -80,7 +80,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call floorf@plt
+; RV32IZFHMIN-NEXT: call floorf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -91,7 +91,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call floorf@plt
+; RV64IZFHMIN-NEXT: call floorf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -102,7 +102,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call floorf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call floorf
; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -113,7 +113,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT: call floorf@plt
+; RV64IZHINXMIN-STRICT-NEXT: call floorf
; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -124,7 +124,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call floorf@plt
+; RV32IZDINXZHINXMIN-NEXT: call floorf
; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -135,7 +135,7 @@ define half @floor_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT: call floorf@plt
+; RV64IZDINXZHINXMIN-NEXT: call floorf
; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -152,7 +152,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call ceilf@plt
+; RV32IZFHMIN-NEXT: call ceilf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -163,7 +163,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call ceilf@plt
+; RV64IZFHMIN-NEXT: call ceilf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -174,7 +174,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call ceilf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call ceilf
; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -185,7 +185,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT: call ceilf@plt
+; RV64IZHINXMIN-STRICT-NEXT: call ceilf
; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -196,7 +196,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call ceilf@plt
+; RV32IZDINXZHINXMIN-NEXT: call ceilf
; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -207,7 +207,7 @@ define half @ceil_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT: call ceilf@plt
+; RV64IZDINXZHINXMIN-NEXT: call ceilf
; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -224,7 +224,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call truncf@plt
+; RV32IZFHMIN-NEXT: call truncf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -235,7 +235,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call truncf@plt
+; RV64IZFHMIN-NEXT: call truncf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -246,7 +246,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call truncf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call truncf
; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -257,7 +257,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT: call truncf@plt
+; RV64IZHINXMIN-STRICT-NEXT: call truncf
; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -268,7 +268,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call truncf@plt
+; RV32IZDINXZHINXMIN-NEXT: call truncf
; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -279,7 +279,7 @@ define half @trunc_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT: call truncf@plt
+; RV64IZDINXZHINXMIN-NEXT: call truncf
; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -296,7 +296,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call rintf@plt
+; RV32IZFHMIN-NEXT: call rintf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -307,7 +307,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call rintf@plt
+; RV64IZFHMIN-NEXT: call rintf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -318,7 +318,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call rintf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call rintf
; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -329,7 +329,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT: call rintf@plt
+; RV64IZHINXMIN-STRICT-NEXT: call rintf
; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -340,7 +340,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call rintf@plt
+; RV32IZDINXZHINXMIN-NEXT: call rintf
; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -351,7 +351,7 @@ define half @rint_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT: call rintf@plt
+; RV64IZDINXZHINXMIN-NEXT: call rintf
; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -368,7 +368,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call nearbyintf@plt
+; RV32IZFHMIN-NEXT: call nearbyintf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -379,7 +379,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call nearbyintf@plt
+; RV64IZFHMIN-NEXT: call nearbyintf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -390,7 +390,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call nearbyintf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call nearbyintf
; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -401,7 +401,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT: call nearbyintf@plt
+; RV64IZHINXMIN-STRICT-NEXT: call nearbyintf
; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -412,7 +412,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call nearbyintf@plt
+; RV32IZDINXZHINXMIN-NEXT: call nearbyintf
; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -423,7 +423,7 @@ define half @nearbyint_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT: call nearbyintf@plt
+; RV64IZDINXZHINXMIN-NEXT: call nearbyintf
; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -440,7 +440,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call roundf@plt
+; RV32IZFHMIN-NEXT: call roundf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -451,7 +451,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call roundf@plt
+; RV64IZFHMIN-NEXT: call roundf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -462,7 +462,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call roundf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call roundf
; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -473,7 +473,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT: call roundf@plt
+; RV64IZHINXMIN-STRICT-NEXT: call roundf
; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -484,7 +484,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call roundf@plt
+; RV32IZDINXZHINXMIN-NEXT: call roundf
; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -495,7 +495,7 @@ define half @round_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT: call roundf@plt
+; RV64IZDINXZHINXMIN-NEXT: call roundf
; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -512,7 +512,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call roundevenf@plt
+; RV32IZFHMIN-NEXT: call roundevenf
; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
@@ -523,7 +523,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV64IZFHMIN-NEXT: addi sp, sp, -16
; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFHMIN-NEXT: call roundevenf@plt
+; RV64IZFHMIN-NEXT: call roundevenf
; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa0
; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFHMIN-NEXT: addi sp, sp, 16
@@ -534,7 +534,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call roundevenf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call roundevenf
; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -545,7 +545,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV64IZHINXMIN-STRICT-NEXT: call roundevenf@plt
+; RV64IZHINXMIN-STRICT-NEXT: call roundevenf
; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
@@ -556,7 +556,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call roundevenf@plt
+; RV32IZDINXZHINXMIN-NEXT: call roundevenf
; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -567,7 +567,7 @@ define half @roundeven_f16(half %a) nounwind strictfp {
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV64IZDINXZHINXMIN-NEXT: call roundevenf@plt
+; RV64IZDINXZHINXMIN-NEXT: call roundevenf
; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16
@@ -668,7 +668,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call llrintf@plt
+; RV32IZFHMIN-NEXT: call llrintf
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -684,7 +684,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call llrintf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call llrintf
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-STRICT-NEXT: ret
@@ -700,7 +700,7 @@ define i64 @llrint_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call llrintf@plt
+; RV32IZDINXZHINXMIN-NEXT: call llrintf
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZDINXZHINXMIN-NEXT: ret
@@ -722,7 +722,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
; RV32IZFHMIN-NEXT: addi sp, sp, -16
; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFHMIN-NEXT: call llroundf@plt
+; RV32IZFHMIN-NEXT: call llroundf
; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFHMIN-NEXT: addi sp, sp, 16
; RV32IZFHMIN-NEXT: ret
@@ -738,7 +738,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16
; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0
-; RV32IZHINXMIN-STRICT-NEXT: call llroundf@plt
+; RV32IZHINXMIN-STRICT-NEXT: call llroundf
; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16
; RV32IZHINXMIN-STRICT-NEXT: ret
@@ -754,7 +754,7 @@ define i64 @llround_f16(half %a) nounwind strictfp {
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16
; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINXMIN-NEXT: call llroundf@plt
+; RV32IZDINXZHINXMIN-NEXT: call llroundf
; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16
; RV32IZDINXZHINXMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
new file mode 100644
index 000000000000..d351c9c4d2a4
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/scfg-add-pre-headers.ll
@@ -0,0 +1,66 @@
+; RUN: llc -mtriple=spirv-unknown-unknown -O0 %s -o - | FileCheck %s
+
+; CHECK-DAG: %[[#bool:]] = OpTypeBool
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
+
+define void @main() #1 {
+ %1 = icmp ne i32 0, 0
+ br i1 %1, label %l1, label %l2
+
+; CHECK: %[[#cond:]] = OpINotEqual %[[#bool]] %[[#uint_0]] %[[#uint_0]]
+; CHECK: OpBranchConditional %[[#cond]] %[[#l1_pre:]] %[[#l2_pre:]]
+
+; CHECK-DAG: %[[#l2_pre]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#l2_header:]]
+
+; CHECK-DAG: %[[#l1_pre]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#l1_header:]]
+
+l1:
+ br i1 %1, label %l1_body, label %l1_end
+; CHECK-DAG: %[[#l1_header]] = OpLabel
+; CHECK-NEXT: OpBranchConditional %[[#cond]] %[[#l1_body:]] %[[#l1_end:]]
+
+l1_body:
+ br label %l1_continue
+; CHECK-DAG: %[[#l1_body]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#l1_continue:]]
+
+l1_continue:
+ br label %l1
+; CHECK-DAG: %[[#l1_continue]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#l1_header]]
+
+l1_end:
+ br label %end
+; CHECK-DAG: %[[#l1_end]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#end:]]
+
+l2:
+ br i1 %1, label %l2_body, label %l2_end
+; CHECK-DAG: %[[#l2_header]] = OpLabel
+; CHECK-NEXT: OpBranchConditional %[[#cond]] %[[#l2_body:]] %[[#l2_end:]]
+
+l2_body:
+ br label %l2_continue
+; CHECK-DAG: %[[#l2_body]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#l2_continue:]]
+
+l2_continue:
+ br label %l2
+; CHECK-DAG: %[[#l2_continue]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#l2_header]]
+
+l2_end:
+ br label %end
+; CHECK-DAG: %[[#l2_end]] = OpLabel
+; CHECK-NEXT: OpBranch %[[#end:]]
+
+end:
+ ret void
+; CHECK-DAG: %[[#end]] = OpLabel
+; CHECK-NEXT: OpReturn
+}
+
+attributes #1 = { "hlsl.numthreads"="4,8,16" "hlsl.shader"="compute" convergent }
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-14.ll b/llvm/test/CodeGen/SystemZ/vec-perm-14.ll
index fb3ece96017b..0b392676fa3e 100644
--- a/llvm/test/CodeGen/SystemZ/vec-perm-14.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-14.ll
@@ -1,16 +1,14 @@
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
-;
-; Test that only one vperm of the vector compare is needed for both extracts.
+; Test that no vperm of the vector compare is needed for the extracts.
define void @fun() {
; CHECK-LABEL: fun:
; CHECK: # %bb.0: # %bb
; CHECK-NEXT: vlrepf %v0, 0(%r1)
; CHECK-NEXT: vgbm %v1, 0
-; CHECK-NEXT: larl %r1, .LCPI0_0
; CHECK-NEXT: vceqb %v0, %v0, %v1
-; CHECK-NEXT: vl %v1, 0(%r1), 3
-; CHECK-NEXT: vperm %v0, %v0, %v0, %v1
+; CHECK-NEXT: vuphb %v0, %v0
+; CHECK-NEXT: vuphh %v0, %v0
; CHECK-NEXT: vlgvf %r0, %v0, 0
; CHECK-NEXT: tmll %r0, 1
; CHECK-NEXT: je .LBB0_2
diff --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll
index 402645ed1e2e..11025b0e6bf2 100644
--- a/llvm/test/CodeGen/X86/O0-pipeline.ll
+++ b/llvm/test/CodeGen/X86/O0-pipeline.ll
@@ -68,7 +68,7 @@
; CHECK-NEXT: Implement the 'patchable-function' attribute
; CHECK-NEXT: X86 Indirect Branch Tracking
; CHECK-NEXT: X86 vzeroupper inserter
-; CHECK-NEXT: Compressing EVEX instrs to VEX encoding when possibl
+; CHECK-NEXT: Compressing EVEX instrs when possible
; CHECK-NEXT: X86 Discriminate Memory Operands
; CHECK-NEXT: X86 Insert Cache Prefetches
; CHECK-NEXT: X86 insert wait instruction
diff --git a/llvm/test/CodeGen/X86/avx2-gather.ll b/llvm/test/CodeGen/X86/avx2-gather.ll
index e02ae09a0981..4b77edefa820 100644
--- a/llvm/test/CodeGen/X86/avx2-gather.ll
+++ b/llvm/test/CodeGen/X86/avx2-gather.ll
@@ -1,18 +1,18 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr,
<4 x i32>, <4 x float>, i8) nounwind readonly
define <4 x float> @test_x86_avx2_gather_d_ps(ptr %a1, <4 x i32> %idx, <4 x float> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_ps:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; X32-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
-; X32-NEXT: vmovaps %xmm2, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: test_x86_avx2_gather_d_ps:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; X86-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
+; X86-NEXT: vmovaps %xmm2, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
; X64: # %bb.0:
@@ -29,13 +29,13 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr,
<4 x i32>, <2 x double>, i8) nounwind readonly
define <2 x double> @test_x86_avx2_gather_d_pd(ptr %a1, <4 x i32> %idx, <2 x double> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_pd:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; X32-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
-; X32-NEXT: vmovapd %xmm2, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: test_x86_avx2_gather_d_pd:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; X86-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
+; X86-NEXT: vmovapd %xmm2, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
; X64: # %bb.0:
@@ -52,13 +52,13 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr,
<8 x i32>, <8 x float>, i8) nounwind readonly
define <8 x float> @test_x86_avx2_gather_d_ps_256(ptr %a1, <8 x i32> %idx, <8 x float> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_ps_256:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; X32-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
-; X32-NEXT: vmovaps %ymm2, %ymm0
-; X32-NEXT: retl
+; X86-LABEL: test_x86_avx2_gather_d_ps_256:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; X86-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
+; X86-NEXT: vmovaps %ymm2, %ymm0
+; X86-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
; X64: # %bb.0:
@@ -75,13 +75,13 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr,
<4 x i32>, <4 x double>, i8) nounwind readonly
define <4 x double> @test_x86_avx2_gather_d_pd_256(ptr %a1, <4 x i32> %idx, <4 x double> %mask) {
-; X32-LABEL: test_x86_avx2_gather_d_pd_256:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; X32-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
-; X32-NEXT: vmovapd %ymm2, %ymm0
-; X32-NEXT: retl
+; X86-LABEL: test_x86_avx2_gather_d_pd_256:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; X86-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
+; X86-NEXT: vmovapd %ymm2, %ymm0
+; X86-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
; X64: # %bb.0:
@@ -95,14 +95,14 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(ptr %a1, <4 x i32> %idx, <4 x
}
define <2 x i64> @test_mm_i32gather_epi32(ptr%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_i32gather_epi32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X32-NEXT: vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
-; X32-NEXT: vmovdqa %xmm1, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: test_mm_i32gather_epi32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X86-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-NEXT: vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
+; X86-NEXT: vmovdqa %xmm1, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_epi32:
; X64: # %bb.0:
@@ -121,14 +121,14 @@ define <2 x i64> @test_mm_i32gather_epi32(ptr%a0, <2 x i64> %a1) {
declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr, <4 x i32>, <4 x i32>, i8) nounwind readonly
define <2 x double> @test_mm_i32gather_pd(ptr%a0, <2 x i64> %a1) {
-; X32-LABEL: test_mm_i32gather_pd:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; X32-NEXT: vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
-; X32-NEXT: vmovapd %xmm1, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: test_mm_i32gather_pd:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X86-NEXT: vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
+; X86-NEXT: vmovapd %xmm1, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_pd:
; X64: # %bb.0:
@@ -149,14 +149,14 @@ define <2 x double> @test_mm_i32gather_pd(ptr%a0, <2 x i64> %a1) {
@x = dso_local global [1024 x float] zeroinitializer, align 16
define <4 x float> @gather_global(<4 x i64>, ptr nocapture readnone) {
-; X32-LABEL: gather_global:
-; X32: # %bb.0:
-; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vgatherqps %xmm2, x(,%ymm0,4), %xmm1
-; X32-NEXT: vmovaps %xmm1, %xmm0
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: gather_global:
+; X86: # %bb.0:
+; X86-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
+; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-NEXT: vgatherqps %xmm2, x(,%ymm0,4), %xmm1
+; X86-NEXT: vmovaps %xmm1, %xmm0
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: gather_global:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/avx2-nontemporal.ll b/llvm/test/CodeGen/X86/avx2-nontemporal.ll
index 95568e7b3d0a..cd16b3018448 100644
--- a/llvm/test/CodeGen/X86/avx2-nontemporal.ll
+++ b/llvm/test/CodeGen/X86/avx2-nontemporal.ll
@@ -1,41 +1,41 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=X64
define i32 @f(<8 x float> %A, ptr %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %F, <16 x i16> %G, <32 x i8> %H, ptr %loadptr) nounwind {
-; X32-LABEL: f:
-; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-32, %esp
-; X32-NEXT: subl $32, %esp
-; X32-NEXT: vmovdqa 104(%ebp), %ymm3
-; X32-NEXT: vmovdqa 72(%ebp), %ymm4
-; X32-NEXT: vmovdqa 40(%ebp), %ymm5
-; X32-NEXT: movl 8(%ebp), %ecx
-; X32-NEXT: movl 136(%ebp), %edx
-; X32-NEXT: movl (%edx), %eax
-; X32-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT: vmovntps %ymm0, (%ecx)
-; X32-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm0
-; X32-NEXT: addl (%edx), %eax
-; X32-NEXT: vmovntdq %ymm0, (%ecx)
-; X32-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
-; X32-NEXT: addl (%edx), %eax
-; X32-NEXT: vmovntpd %ymm0, (%ecx)
-; X32-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm5, %ymm0
-; X32-NEXT: addl (%edx), %eax
-; X32-NEXT: vmovntdq %ymm0, (%ecx)
-; X32-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm4, %ymm0
-; X32-NEXT: addl (%edx), %eax
-; X32-NEXT: vmovntdq %ymm0, (%ecx)
-; X32-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm0
-; X32-NEXT: addl (%edx), %eax
-; X32-NEXT: vmovntdq %ymm0, (%ecx)
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: f:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-32, %esp
+; X86-NEXT: subl $32, %esp
+; X86-NEXT: vmovdqa 104(%ebp), %ymm3
+; X86-NEXT: vmovdqa 72(%ebp), %ymm4
+; X86-NEXT: vmovdqa 40(%ebp), %ymm5
+; X86-NEXT: movl 8(%ebp), %ecx
+; X86-NEXT: movl 136(%ebp), %edx
+; X86-NEXT: movl (%edx), %eax
+; X86-NEXT: vaddps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT: vmovntps %ymm0, (%ecx)
+; X86-NEXT: vpaddq {{\.?LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm0
+; X86-NEXT: addl (%edx), %eax
+; X86-NEXT: vmovntdq %ymm0, (%ecx)
+; X86-NEXT: vaddpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
+; X86-NEXT: addl (%edx), %eax
+; X86-NEXT: vmovntpd %ymm0, (%ecx)
+; X86-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm5, %ymm0
+; X86-NEXT: addl (%edx), %eax
+; X86-NEXT: vmovntdq %ymm0, (%ecx)
+; X86-NEXT: vpaddw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm4, %ymm0
+; X86-NEXT: addl (%edx), %eax
+; X86-NEXT: vmovntdq %ymm0, (%ecx)
+; X86-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm0
+; X86-NEXT: addl (%edx), %eax
+; X86-NEXT: vmovntdq %ymm0, (%ecx)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: f:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/combine-bextr.ll b/llvm/test/CodeGen/X86/combine-bextr.ll
index c36723732b5c..6eea67cb43ec 100644
--- a/llvm/test/CodeGen/X86/combine-bextr.ll
+++ b/llvm/test/CodeGen/X86/combine-bextr.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2,+bmi | FileCheck %s --check-prefixes=CHECK,X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2,+bmi | FileCheck %s --check-prefixes=CHECK,X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2,+bmi | FileCheck %s --check-prefixes=CHECK,X64
declare i32 @llvm.x86.bmi.bextr.32(i32, i32)
-define i32 @bextr_zero_length(i32 %x, i32 %y) {
+define i32 @bextr_zero_length(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: bextr_zero_length:
; CHECK: # %bb.0:
; CHECK-NEXT: xorl %eax, %eax
@@ -14,13 +14,13 @@ define i32 @bextr_zero_length(i32 %x, i32 %y) {
ret i32 %2
}
-define i32 @bextr_big_shift(i32 %x, i32 %y) {
-; X32-LABEL: bextr_big_shift:
-; X32: # %bb.0:
-; X32-NEXT: movl $255, %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+define i32 @bextr_big_shift(i32 %x, i32 %y) nounwind {
+; X86-LABEL: bextr_big_shift:
+; X86: # %bb.0:
+; X86-NEXT: movl $255, %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: bextr_big_shift:
; X64: # %bb.0:
@@ -32,22 +32,20 @@ define i32 @bextr_big_shift(i32 %x, i32 %y) {
ret i32 %2
}
-define float @bextr_uitofp(i32 %x, i32 %y) {
-; X32-LABEL: bextr_uitofp:
-; X32: # %bb.0:
-; X32-NEXT: pushl %eax
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: movl $3855, %eax # imm = 0xF0F
-; X32-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movd %eax, %xmm0
-; X32-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32-NEXT: cvtsd2ss %xmm0, %xmm0
-; X32-NEXT: movss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: retl
+define float @bextr_uitofp(i32 %x, i32 %y) nounwind {
+; X86-LABEL: bextr_uitofp:
+; X86: # %bb.0:
+; X86-NEXT: pushl %eax
+; X86-NEXT: movl $3855, %eax # imm = 0xF0F
+; X86-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movd %eax, %xmm0
+; X86-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: subsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: cvtsd2ss %xmm0, %xmm0
+; X86-NEXT: movss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: bextr_uitofp:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86.ll b/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86.ll
index 873986e99777..fe5182e5ef73 100644
--- a/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86.ll
+++ b/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86.ll
@@ -29,7 +29,7 @@ define i32 @test_mm_crc32_u8(i32 %a0, i32 %a1) nounwind {
; EGPR-LABEL: test_mm_crc32_u8:
; EGPR: # %bb.0:
; EGPR-NEXT: movl %edi, %eax # encoding: [0x89,0xf8]
-; EGPR-NEXT: crc32b %sil, %eax # encoding: [0x62,0xf4,0x7c,0x08,0xf0,0xc6]
+; EGPR-NEXT: crc32b %sil, %eax # EVEX TO LEGACY Compression encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xc6]
; EGPR-NEXT: retq # encoding: [0xc3]
%trunc = trunc i32 %a1 to i8
%res = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a0, i8 %trunc)
@@ -55,7 +55,7 @@ define i32 @test_mm_crc32_u16(i32 %a0, i32 %a1) nounwind {
; EGPR-LABEL: test_mm_crc32_u16:
; EGPR: # %bb.0:
; EGPR-NEXT: movl %edi, %eax # encoding: [0x89,0xf8]
-; EGPR-NEXT: crc32w %si, %eax # encoding: [0x62,0xf4,0x7d,0x08,0xf1,0xc6]
+; EGPR-NEXT: crc32w %si, %eax # EVEX TO LEGACY Compression encoding: [0x66,0xf2,0x0f,0x38,0xf1,0xc6]
; EGPR-NEXT: retq # encoding: [0xc3]
%trunc = trunc i32 %a1 to i16
%res = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a0, i16 %trunc)
@@ -79,7 +79,7 @@ define i32 @test_mm_crc32_u32(i32 %a0, i32 %a1) nounwind {
; EGPR-LABEL: test_mm_crc32_u32:
; EGPR: # %bb.0:
; EGPR-NEXT: movl %edi, %eax # encoding: [0x89,0xf8]
-; EGPR-NEXT: crc32l %esi, %eax # encoding: [0x62,0xf4,0x7c,0x08,0xf1,0xc6]
+; EGPR-NEXT: crc32l %esi, %eax # EVEX TO LEGACY Compression encoding: [0xf2,0x0f,0x38,0xf1,0xc6]
; EGPR-NEXT: retq # encoding: [0xc3]
%res = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a0, i32 %a1)
ret i32 %res
diff --git a/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86_64.ll b/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86_64.ll
index 71d955bda752..ba5f846c22db 100644
--- a/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86_64.ll
+++ b/llvm/test/CodeGen/X86/crc32-intrinsics-fast-isel-x86_64.ll
@@ -15,7 +15,7 @@ define i64 @test_mm_crc64_u8(i64 %a0, i32 %a1) nounwind{
;
; EGPR-LABEL: test_mm_crc64_u8:
; EGPR: # %bb.0:
-; EGPR-NEXT: crc32b %sil, %edi # encoding: [0x62,0xf4,0x7c,0x08,0xf0,0xfe]
+; EGPR-NEXT: crc32b %sil, %edi # EVEX TO LEGACY Compression encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xfe]
; EGPR-NEXT: movl %edi, %eax # encoding: [0x89,0xf8]
; EGPR-NEXT: retq # encoding: [0xc3]
%trunc = trunc i32 %a1 to i8
@@ -34,7 +34,7 @@ define i64 @test_mm_crc64_u64(i64 %a0, i64 %a1) nounwind{
; EGPR-LABEL: test_mm_crc64_u64:
; EGPR: # %bb.0:
; EGPR-NEXT: movq %rdi, %rax # encoding: [0x48,0x89,0xf8]
-; EGPR-NEXT: crc32q %rsi, %rax # encoding: [0x62,0xf4,0xfc,0x08,0xf1,0xc6]
+; EGPR-NEXT: crc32q %rsi, %rax # EVEX TO LEGACY Compression encoding: [0xf2,0x48,0x0f,0x38,0xf1,0xc6]
; EGPR-NEXT: retq # encoding: [0xc3]
%res = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a0, i64 %a1)
ret i64 %res
diff --git a/llvm/test/CodeGen/X86/crc32-intrinsics-x86.ll b/llvm/test/CodeGen/X86/crc32-intrinsics-x86.ll
index 84c7f90cfe3c..ea4e0ffb109c 100644
--- a/llvm/test/CodeGen/X86/crc32-intrinsics-x86.ll
+++ b/llvm/test/CodeGen/X86/crc32-intrinsics-x86.ll
@@ -19,7 +19,7 @@ define i32 @crc32_32_8(i32 %a, i8 %b) nounwind {
; EGPR-LABEL: crc32_32_8:
; EGPR: ## %bb.0:
; EGPR-NEXT: movl %edi, %eax ## encoding: [0x89,0xf8]
-; EGPR-NEXT: crc32b %sil, %eax ## encoding: [0x62,0xf4,0x7c,0x08,0xf0,0xc6]
+; EGPR-NEXT: crc32b %sil, %eax ## EVEX TO LEGACY Compression encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xc6]
; EGPR-NEXT: retq ## encoding: [0xc3]
%tmp = call i32 @llvm.x86.sse42.crc32.32.8(i32 %a, i8 %b)
ret i32 %tmp
@@ -42,7 +42,7 @@ define i32 @crc32_32_16(i32 %a, i16 %b) nounwind {
; EGPR-LABEL: crc32_32_16:
; EGPR: ## %bb.0:
; EGPR-NEXT: movl %edi, %eax ## encoding: [0x89,0xf8]
-; EGPR-NEXT: crc32w %si, %eax ## encoding: [0x62,0xf4,0x7d,0x08,0xf1,0xc6]
+; EGPR-NEXT: crc32w %si, %eax ## EVEX TO LEGACY Compression encoding: [0x66,0xf2,0x0f,0x38,0xf1,0xc6]
; EGPR-NEXT: retq ## encoding: [0xc3]
%tmp = call i32 @llvm.x86.sse42.crc32.32.16(i32 %a, i16 %b)
ret i32 %tmp
@@ -65,7 +65,7 @@ define i32 @crc32_32_32(i32 %a, i32 %b) nounwind {
; EGPR-LABEL: crc32_32_32:
; EGPR: ## %bb.0:
; EGPR-NEXT: movl %edi, %eax ## encoding: [0x89,0xf8]
-; EGPR-NEXT: crc32l %esi, %eax ## encoding: [0x62,0xf4,0x7c,0x08,0xf1,0xc6]
+; EGPR-NEXT: crc32l %esi, %eax ## EVEX TO LEGACY Compression encoding: [0xf2,0x0f,0x38,0xf1,0xc6]
; EGPR-NEXT: retq ## encoding: [0xc3]
%tmp = call i32 @llvm.x86.sse42.crc32.32.32(i32 %a, i32 %b)
ret i32 %tmp
diff --git a/llvm/test/CodeGen/X86/crc32-intrinsics-x86_64.ll b/llvm/test/CodeGen/X86/crc32-intrinsics-x86_64.ll
index bda26a15b277..af2b590b1f6b 100644
--- a/llvm/test/CodeGen/X86/crc32-intrinsics-x86_64.ll
+++ b/llvm/test/CodeGen/X86/crc32-intrinsics-x86_64.ll
@@ -15,7 +15,7 @@ define i64 @crc32_64_8(i64 %a, i8 %b) nounwind {
; EGPR-LABEL: crc32_64_8:
; EGPR: ## %bb.0:
; EGPR-NEXT: movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
-; EGPR-NEXT: crc32b %sil, %eax ## encoding: [0x62,0xf4,0x7c,0x08,0xf0,0xc6]
+; EGPR-NEXT: crc32b %sil, %eax ## EVEX TO LEGACY Compression encoding: [0xf2,0x40,0x0f,0x38,0xf0,0xc6]
; EGPR-NEXT: retq ## encoding: [0xc3]
%tmp = call i64 @llvm.x86.sse42.crc32.64.8(i64 %a, i8 %b)
ret i64 %tmp
@@ -31,7 +31,7 @@ define i64 @crc32_64_64(i64 %a, i64 %b) nounwind {
; EGPR-LABEL: crc32_64_64:
; EGPR: ## %bb.0:
; EGPR-NEXT: movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
-; EGPR-NEXT: crc32q %rsi, %rax ## encoding: [0x62,0xf4,0xfc,0x08,0xf1,0xc6]
+; EGPR-NEXT: crc32q %rsi, %rax ## EVEX TO LEGACY Compression encoding: [0xf2,0x48,0x0f,0x38,0xf1,0xc6]
; EGPR-NEXT: retq ## encoding: [0xc3]
%tmp = call i64 @llvm.x86.sse42.crc32.64.64(i64 %a, i64 %b)
ret i64 %tmp
diff --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
index 06d3c1532c3e..548cf24b9200 100644
--- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=x86_64-- -run-pass x86-evex-to-vex-compress -verify-machineinstrs -mcpu=skx -o - %s | FileCheck %s
+# RUN: llc -mtriple=x86_64-- -run-pass x86-compress-evex -verify-machineinstrs -mcpu=skx -o - %s | FileCheck %s
# This test verifies VEX encoding for AVX-512 instructions that use registers of low indexes and
# do not use zmm or mask registers and have a corresponding AVX/AVX2 opcode
diff --git a/llvm/test/CodeGen/X86/ftrunc.ll b/llvm/test/CodeGen/X86/ftrunc.ll
index d52d14572082..08705e9cdc59 100644
--- a/llvm/test/CodeGen/X86/ftrunc.ll
+++ b/llvm/test/CodeGen/X86/ftrunc.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=X64_AVX1
-; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=X32_AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,X64-AVX1
+; RUN: llc < %s -mtriple=i686-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,X86-AVX1
declare i32 @llvm.fptoui.sat.i32.f32(float)
declare i64 @llvm.fptosi.sat.i64.f64(double)
@@ -21,20 +21,20 @@ define float @trunc_unsigned_f32(float %x) #0 {
; SSE41-NEXT: roundss $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_f32:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_f32:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_unsigned_f32:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_unsigned_f32:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = fptoui float %x to i32
%r = uitofp i32 %i to float
ret float %r
@@ -63,24 +63,24 @@ define double @trunc_unsigned_f64(double %x) #0 {
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_f64:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_f64:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_unsigned_f64:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_unsigned_f64:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptoui double %x to i64
%r = uitofp i64 %i to double
ret double %r
@@ -110,15 +110,10 @@ define <4 x float> @trunc_unsigned_v4f32(<4 x float> %x) #0 {
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_v4f32:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_v4f32:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_unsigned_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <4 x float> %x to <4 x i32>
%r = uitofp <4 x i32> %i to <4 x float>
ret <4 x float> %r
@@ -162,15 +157,10 @@ define <2 x double> @trunc_unsigned_v2f64(<2 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_v2f64:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_v2f64:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_unsigned_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <2 x double> %x to <2 x i64>
%r = uitofp <2 x i64> %i to <2 x double>
ret <2 x double> %r
@@ -244,15 +234,10 @@ define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_v4f64:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_v4f64:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_unsigned_v4f64:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptoui <4 x double> %x to <4 x i64>
%r = uitofp <4 x i64> %i to <4 x double>
ret <4 x double> %r
@@ -265,24 +250,24 @@ define float @trunc_signed_f32_no_fast_math(float %x) {
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f32_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f32_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f32_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f32_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -302,20 +287,20 @@ define float @trunc_signed_f32_nsz(float %x) #0 {
; SSE41-NEXT: roundss $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f32_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f32_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f32_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f32_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -328,30 +313,30 @@ define double @trunc_signed32_f64_no_fast_math(double %x) {
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed32_f64_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed32_f64_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: .cfi_offset %ebp, -8
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_register %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovlps %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa %esp, 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed32_f64_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed32_f64_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: .cfi_offset %ebp, -8
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -369,24 +354,24 @@ define double @trunc_signed32_f64_nsz(double %x) #0 {
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed32_f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed32_f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed32_f64_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed32_f64_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -399,30 +384,30 @@ define double @trunc_f32_signed32_f64_no_fast_math(float %x) {
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: .cfi_offset %ebp, -8
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_register %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovlps %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa %esp, 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f32_signed32_f64_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: .cfi_offset %ebp, -8
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -435,26 +420,26 @@ define double @trunc_f32_signed32_f64_nsz(float %x) #0 {
; SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f32_signed32_f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f32_signed32_f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovlps %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f32_signed32_f64_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f32_signed32_f64_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovlps %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptosi float %x to i32
%r = sitofp i32 %i to double
ret double %r
@@ -467,24 +452,24 @@ define float @trunc_f64_signed32_f32_no_fast_math(double %x) {
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f64_signed32_f32_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -497,22 +482,22 @@ define float @trunc_f64_signed32_f32_nsz(double %x) #0 {
; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_f64_signed32_f32_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X64_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_f64_signed32_f32_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_f64_signed32_f32_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X64-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_f64_signed32_f32_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vcvttpd2dq %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtdq2ps %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i32
%r = sitofp i32 %i to float
ret float %r
@@ -526,34 +511,34 @@ define double @trunc_signed_f64_no_fast_math(double %x) {
; SSE-NEXT: cvtsi2sd %rax, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f64_no_fast_math:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttsd2si %xmm0, %rax
-; X64_AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f64_no_fast_math:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_offset 8
-; X32_AVX1-NEXT: .cfi_offset %ebp, -8
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa_register %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $24, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: fisttpll (%esp)
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fildll {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fldl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: .cfi_def_cfa %esp, 4
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f64_no_fast_math:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttsd2si %xmm0, %rax
+; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f64_no_fast_math:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT: .cfi_offset %ebp, -8
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa_register %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $24, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: fisttpll (%esp)
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: .cfi_def_cfa %esp, 4
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i64
%r = sitofp i64 %i to double
ret double %r
@@ -572,24 +557,24 @@ define double @trunc_signed_f64_nsz(double %x) #0 {
; SSE41-NEXT: roundsd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $8, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: movl %ebp, %esp
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f64_nsz:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f64_nsz:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $8, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: movl %ebp, %esp
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = fptosi double %x to i64
%r = sitofp i64 %i to double
ret double %r
@@ -607,15 +592,10 @@ define <4 x float> @trunc_signed_v4f32_nsz(<4 x float> %x) #0 {
; SSE41-NEXT: roundps $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_v4f32_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_v4f32_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundps $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_signed_v4f32_nsz:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundps $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <4 x float> %x to <4 x i32>
%r = sitofp <4 x i32> %i to <4 x float>
ret <4 x float> %r
@@ -638,15 +618,10 @@ define <2 x double> @trunc_signed_v2f64_nsz(<2 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm0, %xmm0
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_v2f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_v2f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %xmm0, %xmm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_signed_v2f64_nsz:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <2 x double> %x to <2 x i64>
%r = sitofp <2 x i64> %i to <2 x double>
ret <2 x double> %r
@@ -678,15 +653,10 @@ define <4 x double> @trunc_signed_v4f64_nsz(<4 x double> %x) #0 {
; SSE41-NEXT: roundpd $11, %xmm1, %xmm1
; SSE41-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_v4f64_nsz:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_v4f64_nsz:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: vroundpd $11, %ymm0, %ymm0
-; X32_AVX1-NEXT: retl
+; AVX-LABEL: trunc_signed_v4f64_nsz:
+; AVX: # %bb.0:
+; AVX-NEXT: vroundpd $11, %ymm0, %ymm0
+; AVX-NEXT: ret{{[l|q]}}
%i = fptosi <4 x double> %x to <4 x i64>
%r = sitofp <4 x i64> %i to <4 x double>
ret <4 x double> %r
@@ -715,45 +685,45 @@ define float @trunc_unsigned_f32_disable_via_intrinsic(float %x) #0 {
; SSE-NEXT: cvtsi2ss %rax, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttss2si %xmm0, %rax
-; X64_AVX1-NEXT: xorl %ecx, %ecx
-; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X64_AVX1-NEXT: vucomiss %xmm1, %xmm0
-; X64_AVX1-NEXT: cmovael %eax, %ecx
-; X64_AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64_AVX1-NEXT: movl $-1, %eax
-; X64_AVX1-NEXT: cmovbel %ecx, %eax
-; X64_AVX1-NEXT: vcvtsi2ss %rax, %xmm2, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %eax
-; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX1-NEXT: movl %eax, %ecx
-; X32_AVX1-NEXT: sarl $31, %ecx
-; X32_AVX1-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
-; X32_AVX1-NEXT: vcvttss2si %xmm1, %edx
-; X32_AVX1-NEXT: andl %ecx, %edx
-; X32_AVX1-NEXT: orl %eax, %edx
-; X32_AVX1-NEXT: xorl %eax, %eax
-; X32_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32_AVX1-NEXT: vucomiss %xmm1, %xmm0
-; X32_AVX1-NEXT: cmovael %edx, %eax
-; X32_AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32_AVX1-NEXT: movl $-1, %ecx
-; X32_AVX1-NEXT: cmovbel %eax, %ecx
-; X32_AVX1-NEXT: vmovd %ecx, %xmm0
-; X32_AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX1-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
-; X32_AVX1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovss %xmm0, (%esp)
-; X32_AVX1-NEXT: flds (%esp)
-; X32_AVX1-NEXT: popl %eax
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttss2si %xmm0, %rax
+; X64-AVX1-NEXT: xorl %ecx, %ecx
+; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X64-AVX1-NEXT: vucomiss %xmm1, %xmm0
+; X64-AVX1-NEXT: cmovael %eax, %ecx
+; X64-AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX1-NEXT: movl $-1, %eax
+; X64-AVX1-NEXT: cmovbel %ecx, %eax
+; X64-AVX1-NEXT: vcvtsi2ss %rax, %xmm2, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_unsigned_f32_disable_via_intrinsic:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %eax
+; X86-AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT: vcvttss2si %xmm0, %eax
+; X86-AVX1-NEXT: movl %eax, %ecx
+; X86-AVX1-NEXT: sarl $31, %ecx
+; X86-AVX1-NEXT: vsubss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
+; X86-AVX1-NEXT: vcvttss2si %xmm1, %edx
+; X86-AVX1-NEXT: andl %ecx, %edx
+; X86-AVX1-NEXT: orl %eax, %edx
+; X86-AVX1-NEXT: xorl %eax, %eax
+; X86-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-AVX1-NEXT: vucomiss %xmm1, %xmm0
+; X86-AVX1-NEXT: cmovael %edx, %eax
+; X86-AVX1-NEXT: vucomiss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-AVX1-NEXT: movl $-1, %ecx
+; X86-AVX1-NEXT: cmovbel %eax, %ecx
+; X86-AVX1-NEXT: vmovd %ecx, %xmm0
+; X86-AVX1-NEXT: vpor {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vsubsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT: flds (%esp)
+; X86-AVX1-NEXT: popl %eax
+; X86-AVX1-NEXT: retl
%i = call i32 @llvm.fptoui.sat.i32.f32(float %x)
%r = uitofp i32 %i to float
ret float %r
@@ -773,56 +743,56 @@ define double @trunc_signed_f64_disable_via_intrinsic(double %x) #0 {
; SSE-NEXT: cvtsi2sd %rax, %xmm0
; SSE-NEXT: retq
;
-; X64_AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
-; X64_AVX1: # %bb.0:
-; X64_AVX1-NEXT: vcvttsd2si %xmm0, %rax
-; X64_AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; X64_AVX1-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
-; X64_AVX1-NEXT: cmovbeq %rax, %rcx
-; X64_AVX1-NEXT: xorl %eax, %eax
-; X64_AVX1-NEXT: vucomisd %xmm0, %xmm0
-; X64_AVX1-NEXT: cmovnpq %rcx, %rax
-; X64_AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
-; X64_AVX1-NEXT: retq
-;
-; X32_AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
-; X32_AVX1: # %bb.0:
-; X32_AVX1-NEXT: pushl %ebp
-; X32_AVX1-NEXT: movl %esp, %ebp
-; X32_AVX1-NEXT: pushl %esi
-; X32_AVX1-NEXT: andl $-8, %esp
-; X32_AVX1-NEXT: subl $32, %esp
-; X32_AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32_AVX1-NEXT: vmovsd %xmm0, (%esp)
-; X32_AVX1-NEXT: fldl (%esp)
-; X32_AVX1-NEXT: fisttpll (%esp)
-; X32_AVX1-NEXT: xorl %eax, %eax
-; X32_AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32_AVX1-NEXT: movl $-2147483648, %ecx # imm = 0x80000000
-; X32_AVX1-NEXT: movl $0, %edx
-; X32_AVX1-NEXT: jb .LBB19_2
-; X32_AVX1-NEXT: # %bb.1:
-; X32_AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32_AVX1-NEXT: movl (%esp), %edx
-; X32_AVX1-NEXT: .LBB19_2:
-; X32_AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
-; X32_AVX1-NEXT: movl $-1, %esi
-; X32_AVX1-NEXT: cmovbel %edx, %esi
-; X32_AVX1-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
-; X32_AVX1-NEXT: cmovbel %ecx, %edx
-; X32_AVX1-NEXT: vucomisd %xmm0, %xmm0
-; X32_AVX1-NEXT: cmovpl %eax, %edx
-; X32_AVX1-NEXT: cmovpl %eax, %esi
-; X32_AVX1-NEXT: vmovd %esi, %xmm0
-; X32_AVX1-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
-; X32_AVX1-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fildll {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: fldl {{[0-9]+}}(%esp)
-; X32_AVX1-NEXT: leal -4(%ebp), %esp
-; X32_AVX1-NEXT: popl %esi
-; X32_AVX1-NEXT: popl %ebp
-; X32_AVX1-NEXT: retl
+; X64-AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vcvttsd2si %xmm0, %rax
+; X64-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-AVX1-NEXT: movabsq $9223372036854775807, %rcx # imm = 0x7FFFFFFFFFFFFFFF
+; X64-AVX1-NEXT: cmovbeq %rax, %rcx
+; X64-AVX1-NEXT: xorl %eax, %eax
+; X64-AVX1-NEXT: vucomisd %xmm0, %xmm0
+; X64-AVX1-NEXT: cmovnpq %rcx, %rax
+; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm1, %xmm0
+; X64-AVX1-NEXT: retq
+;
+; X86-AVX1-LABEL: trunc_signed_f64_disable_via_intrinsic:
+; X86-AVX1: # %bb.0:
+; X86-AVX1-NEXT: pushl %ebp
+; X86-AVX1-NEXT: movl %esp, %ebp
+; X86-AVX1-NEXT: pushl %esi
+; X86-AVX1-NEXT: andl $-8, %esp
+; X86-AVX1-NEXT: subl $32, %esp
+; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX1-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX1-NEXT: fldl (%esp)
+; X86-AVX1-NEXT: fisttpll (%esp)
+; X86-AVX1-NEXT: xorl %eax, %eax
+; X86-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-AVX1-NEXT: movl $-2147483648, %ecx # imm = 0x80000000
+; X86-AVX1-NEXT: movl $0, %edx
+; X86-AVX1-NEXT: jb .LBB19_2
+; X86-AVX1-NEXT: # %bb.1:
+; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX1-NEXT: movl (%esp), %edx
+; X86-AVX1-NEXT: .LBB19_2:
+; X86-AVX1-NEXT: vucomisd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-AVX1-NEXT: movl $-1, %esi
+; X86-AVX1-NEXT: cmovbel %edx, %esi
+; X86-AVX1-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
+; X86-AVX1-NEXT: cmovbel %ecx, %edx
+; X86-AVX1-NEXT: vucomisd %xmm0, %xmm0
+; X86-AVX1-NEXT: cmovpl %eax, %edx
+; X86-AVX1-NEXT: cmovpl %eax, %esi
+; X86-AVX1-NEXT: vmovd %esi, %xmm0
+; X86-AVX1-NEXT: vpinsrd $1, %edx, %xmm0, %xmm0
+; X86-AVX1-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-AVX1-NEXT: leal -4(%ebp), %esp
+; X86-AVX1-NEXT: popl %esi
+; X86-AVX1-NEXT: popl %ebp
+; X86-AVX1-NEXT: retl
%i = call i64 @llvm.fptosi.sat.i64.f64(double %x)
%r = sitofp i64 %i to double
ret double %r
diff --git a/llvm/test/CodeGen/X86/i64-mem-copy.ll b/llvm/test/CodeGen/X86/i64-mem-copy.ll
index 500c6c787694..4cdb079d4399 100644
--- a/llvm/test/CodeGen/X86/i64-mem-copy.ll
+++ b/llvm/test/CodeGen/X86/i64-mem-copy.ll
@@ -1,33 +1,33 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X32AVX
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X86AVX
; Use movq or movsd to load / store i64 values if sse2 is available.
; rdar://6659858
-define void @foo(ptr %x, ptr %y) {
+define void @foo(ptr %x, ptr %y) nounwind {
; X64-LABEL: foo:
; X64: # %bb.0:
; X64-NEXT: movq (%rsi), %rax
; X64-NEXT: movq %rax, (%rdi)
; X64-NEXT: retq
;
-; X32-LABEL: foo:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: movsd %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: foo:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: movsd %xmm0, (%eax)
+; X86-NEXT: retl
;
-; X32AVX-LABEL: foo:
-; X32AVX: # %bb.0:
-; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32AVX-NEXT: vmovsd %xmm0, (%eax)
-; X32AVX-NEXT: retl
+; X86AVX-LABEL: foo:
+; X86AVX: # %bb.0:
+; X86AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX-NEXT: vmovsd %xmm0, (%eax)
+; X86AVX-NEXT: retl
%tmp1 = load i64, ptr %y, align 8
store i64 %tmp1, ptr %x, align 8
ret void
@@ -36,26 +36,26 @@ define void @foo(ptr %x, ptr %y) {
; Verify that a 64-bit chunk extracted from a vector is stored with a movq
; regardless of whether the system is 64-bit.
-define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, ptr %i) {
+define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, ptr %i) nounwind {
; X64-LABEL: store_i64_from_vector:
; X64: # %bb.0:
; X64-NEXT: paddw %xmm1, %xmm0
; X64-NEXT: movq %xmm0, (%rdi)
; X64-NEXT: retq
;
-; X32-LABEL: store_i64_from_vector:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: paddw %xmm1, %xmm0
-; X32-NEXT: movq %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: store_i64_from_vector:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: paddw %xmm1, %xmm0
+; X86-NEXT: movq %xmm0, (%eax)
+; X86-NEXT: retl
;
-; X32AVX-LABEL: store_i64_from_vector:
-; X32AVX: # %bb.0:
-; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; X32AVX-NEXT: vmovq %xmm0, (%eax)
-; X32AVX-NEXT: retl
+; X86AVX-LABEL: store_i64_from_vector:
+; X86AVX: # %bb.0:
+; X86AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; X86AVX-NEXT: vmovq %xmm0, (%eax)
+; X86AVX-NEXT: retl
%z = add <8 x i16> %x, %y ; force execution domain
%bc = bitcast <8 x i16> %z to <2 x i64>
%vecext = extractelement <2 x i64> %bc, i32 0
@@ -63,39 +63,35 @@ define void @store_i64_from_vector(<8 x i16> %x, <8 x i16> %y, ptr %i) {
ret void
}
-define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, ptr %i) {
+define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, ptr %i) nounwind {
; X64-LABEL: store_i64_from_vector256:
; X64: # %bb.0:
; X64-NEXT: paddw %xmm3, %xmm1
; X64-NEXT: movq %xmm1, (%rdi)
; X64-NEXT: retq
;
-; X32-LABEL: store_i64_from_vector256:
-; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: .cfi_offset %ebp, -8
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: .cfi_def_cfa_register %ebp
-; X32-NEXT: andl $-16, %esp
-; X32-NEXT: subl $16, %esp
-; X32-NEXT: movl 24(%ebp), %eax
-; X32-NEXT: paddw 8(%ebp), %xmm1
-; X32-NEXT: movq %xmm1, (%eax)
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
-; X32-NEXT: .cfi_def_cfa %esp, 4
-; X32-NEXT: retl
+; X86-LABEL: store_i64_from_vector256:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl 24(%ebp), %eax
+; X86-NEXT: paddw 8(%ebp), %xmm1
+; X86-NEXT: movq %xmm1, (%eax)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
;
-; X32AVX-LABEL: store_i64_from_vector256:
-; X32AVX: # %bb.0:
-; X32AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32AVX-NEXT: vextracti128 $1, %ymm1, %xmm1
-; X32AVX-NEXT: vextracti128 $1, %ymm0, %xmm0
-; X32AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
-; X32AVX-NEXT: vmovq %xmm0, (%eax)
-; X32AVX-NEXT: vzeroupper
-; X32AVX-NEXT: retl
+; X86AVX-LABEL: store_i64_from_vector256:
+; X86AVX: # %bb.0:
+; X86AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86AVX-NEXT: vextracti128 $1, %ymm1, %xmm1
+; X86AVX-NEXT: vextracti128 $1, %ymm0, %xmm0
+; X86AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; X86AVX-NEXT: vmovq %xmm0, (%eax)
+; X86AVX-NEXT: vzeroupper
+; X86AVX-NEXT: retl
%z = add <16 x i16> %x, %y ; force execution domain
%bc = bitcast <16 x i16> %z to <4 x i64>
%vecext = extractelement <4 x i64> %bc, i32 2
@@ -125,46 +121,46 @@ define void @PR23476(<5 x i64> %in, ptr %out, i32 %index) nounwind {
; X64-NEXT: movq %rax, (%r9)
; X64-NEXT: retq
;
-; X32-LABEL: PR23476:
-; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-16, %esp
-; X32-NEXT: subl $80, %esp
-; X32-NEXT: movl 52(%ebp), %eax
-; X32-NEXT: andl $7, %eax
-; X32-NEXT: movl 48(%ebp), %ecx
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: movups 8(%ebp), %xmm1
-; X32-NEXT: movups 24(%ebp), %xmm2
-; X32-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
-; X32-NEXT: movaps %xmm1, (%esp)
-; X32-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: movsd %xmm0, (%ecx)
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
-; X32-NEXT: retl
+; X86-LABEL: PR23476:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $80, %esp
+; X86-NEXT: movl 52(%ebp), %eax
+; X86-NEXT: andl $7, %eax
+; X86-NEXT: movl 48(%ebp), %ecx
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: movups 8(%ebp), %xmm1
+; X86-NEXT: movups 24(%ebp), %xmm2
+; X86-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
+; X86-NEXT: movaps %xmm1, (%esp)
+; X86-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: movsd %xmm0, (%ecx)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
;
-; X32AVX-LABEL: PR23476:
-; X32AVX: # %bb.0:
-; X32AVX-NEXT: pushl %ebp
-; X32AVX-NEXT: movl %esp, %ebp
-; X32AVX-NEXT: andl $-32, %esp
-; X32AVX-NEXT: subl $96, %esp
-; X32AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32AVX-NEXT: movl 52(%ebp), %eax
-; X32AVX-NEXT: andl $7, %eax
-; X32AVX-NEXT: movl 48(%ebp), %ecx
-; X32AVX-NEXT: vmovups 8(%ebp), %ymm1
-; X32AVX-NEXT: vmovaps %ymm1, (%esp)
-; X32AVX-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
-; X32AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32AVX-NEXT: vmovsd %xmm0, (%ecx)
-; X32AVX-NEXT: movl %ebp, %esp
-; X32AVX-NEXT: popl %ebp
-; X32AVX-NEXT: vzeroupper
-; X32AVX-NEXT: retl
+; X86AVX-LABEL: PR23476:
+; X86AVX: # %bb.0:
+; X86AVX-NEXT: pushl %ebp
+; X86AVX-NEXT: movl %esp, %ebp
+; X86AVX-NEXT: andl $-32, %esp
+; X86AVX-NEXT: subl $96, %esp
+; X86AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX-NEXT: movl 52(%ebp), %eax
+; X86AVX-NEXT: andl $7, %eax
+; X86AVX-NEXT: movl 48(%ebp), %ecx
+; X86AVX-NEXT: vmovups 8(%ebp), %ymm1
+; X86AVX-NEXT: vmovaps %ymm1, (%esp)
+; X86AVX-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp)
+; X86AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86AVX-NEXT: vmovsd %xmm0, (%ecx)
+; X86AVX-NEXT: movl %ebp, %esp
+; X86AVX-NEXT: popl %ebp
+; X86AVX-NEXT: vzeroupper
+; X86AVX-NEXT: retl
%ext = extractelement <5 x i64> %in, i32 %index
store i64 %ext, ptr %out, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/inline-sse.ll b/llvm/test/CodeGen/X86/inline-sse.ll
index 4e09359afa82..87aa882a1f49 100644
--- a/llvm/test/CodeGen/X86/inline-sse.ll
+++ b/llvm/test/CodeGen/X86/inline-sse.ll
@@ -1,23 +1,23 @@
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-sse2 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
; PR16133 - we must treat XMM registers as v4f32 as SSE1 targets don't permit other vector types.
define void @nop() nounwind {
-; X32-LABEL: nop:
-; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-16, %esp
-; X32-NEXT: subl $32, %esp
-; X32-NEXT: #APP
-; X32-NEXT: #NO_APP
-; X32-NEXT: movaps %xmm0, (%esp)
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
-; X32-NEXT: retl
+; X86-LABEL: nop:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: movl %esp, %ebp
+; X86-NEXT: andl $-16, %esp
+; X86-NEXT: subl $32, %esp
+; X86-NEXT: #APP
+; X86-NEXT: #NO_APP
+; X86-NEXT: movaps %xmm0, (%esp)
+; X86-NEXT: movl %ebp, %esp
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl
;
; X64-LABEL: nop:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/invpcid-intrinsic.ll b/llvm/test/CodeGen/X86/invpcid-intrinsic.ll
index 19a6249fc708..66f7855239c0 100644
--- a/llvm/test/CodeGen/X86/invpcid-intrinsic.ll
+++ b/llvm/test/CodeGen/X86/invpcid-intrinsic.ll
@@ -20,7 +20,7 @@ define void @test_invpcid(i32 %type, ptr %descriptor) {
; EGPR-LABEL: test_invpcid:
; EGPR: # %bb.0: # %entry
; EGPR-NEXT: movl %edi, %eax # encoding: [0x89,0xf8]
-; EGPR-NEXT: invpcid (%rsi), %rax # encoding: [0x62,0xf4,0x7e,0x08,0xf2,0x06]
+; EGPR-NEXT: invpcid (%rsi), %rax # EVEX TO LEGACY Compression encoding: [0x66,0x0f,0x38,0x82,0x06]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
call void @llvm.x86.invpcid(i32 %type, ptr %descriptor)
@@ -45,7 +45,7 @@ define void @test_invpcid2(ptr readonly %type, ptr %descriptor) {
; EGPR-LABEL: test_invpcid2:
; EGPR: # %bb.0: # %entry
; EGPR-NEXT: movl (%rdi), %eax # encoding: [0x8b,0x07]
-; EGPR-NEXT: invpcid (%rsi), %rax # encoding: [0x62,0xf4,0x7e,0x08,0xf2,0x06]
+; EGPR-NEXT: invpcid (%rsi), %rax # EVEX TO LEGACY Compression encoding: [0x66,0x0f,0x38,0x82,0x06]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = load i32, ptr %type, align 4
diff --git a/llvm/test/CodeGen/X86/lea-2.ll b/llvm/test/CodeGen/X86/lea-2.ll
index c91e2f297405..a48c02ff3e0b 100644
--- a/llvm/test/CodeGen/X86/lea-2.ll
+++ b/llvm/test/CodeGen/X86/lea-2.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-linux | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-linux | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s --check-prefix=X64
; RUN: llc < %s -mtriple=x86_64-nacl | FileCheck %s --check-prefix=X64
@@ -7,12 +7,12 @@
; The computation of %t4 should match a single lea, without using actual add instructions.
define i32 @test1(i32 %A, i32 %B) {
-; X32-LABEL: test1:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: leal -5(%ecx,%eax,4), %eax
-; X32-NEXT: retl
+; X86-LABEL: test1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: leal -5(%ecx,%eax,4), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0:
@@ -29,16 +29,16 @@ define i32 @test1(i32 %A, i32 %B) {
; The addlike OR instruction should fold into the LEA.
define i64 @test2(i32 %a0, i64 %a1) {
-; X32-LABEL: test2:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: andl $2147483640, %eax # imm = 0x7FFFFFF8
-; X32-NEXT: shrl $31, %edx
-; X32-NEXT: leal 4(%eax,%eax), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: retl
+; X86-LABEL: test2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: andl $2147483640, %eax # imm = 0x7FFFFFF8
+; X86-NEXT: shrl $31, %edx
+; X86-NEXT: leal 4(%eax,%eax), %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/legalize-shl-vec.ll b/llvm/test/CodeGen/X86/legalize-shl-vec.ll
index cf423227f23b..5e168a82e03e 100644
--- a/llvm/test/CodeGen/X86/legalize-shl-vec.ll
+++ b/llvm/test/CodeGen/X86/legalize-shl-vec.ll
@@ -1,46 +1,46 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
-define <2 x i256> @test_shl(<2 x i256> %In) {
-; X32-LABEL: test_shl:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: shldl $2, %ecx, %edx
-; X32-NEXT: movl %edx, 60(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: shldl $2, %edx, %ecx
-; X32-NEXT: movl %ecx, 56(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shldl $2, %ecx, %edx
-; X32-NEXT: movl %edx, 52(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: shldl $2, %edx, %ecx
-; X32-NEXT: movl %ecx, 48(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shldl $2, %ecx, %edx
-; X32-NEXT: movl %edx, 44(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: shldl $2, %edx, %ecx
-; X32-NEXT: movl %ecx, 40(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shldl $2, %ecx, %edx
-; X32-NEXT: movl %edx, 36(%eax)
-; X32-NEXT: shll $2, %ecx
-; X32-NEXT: movl %ecx, 32(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shll $31, %ecx
-; X32-NEXT: movl %ecx, 28(%eax)
-; X32-NEXT: movl $0, 24(%eax)
-; X32-NEXT: movl $0, 20(%eax)
-; X32-NEXT: movl $0, 16(%eax)
-; X32-NEXT: movl $0, 12(%eax)
-; X32-NEXT: movl $0, 8(%eax)
-; X32-NEXT: movl $0, 4(%eax)
-; X32-NEXT: movl $0, (%eax)
-; X32-NEXT: retl $4
+define <2 x i256> @test_shl(<2 x i256> %In) nounwind {
+; X86-LABEL: test_shl:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $2, %ecx, %edx
+; X86-NEXT: movl %edx, 60(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $2, %edx, %ecx
+; X86-NEXT: movl %ecx, 56(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shldl $2, %ecx, %edx
+; X86-NEXT: movl %edx, 52(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $2, %edx, %ecx
+; X86-NEXT: movl %ecx, 48(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shldl $2, %ecx, %edx
+; X86-NEXT: movl %edx, 44(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shldl $2, %edx, %ecx
+; X86-NEXT: movl %ecx, 40(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shldl $2, %ecx, %edx
+; X86-NEXT: movl %edx, 36(%eax)
+; X86-NEXT: shll $2, %ecx
+; X86-NEXT: movl %ecx, 32(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $31, %ecx
+; X86-NEXT: movl %ecx, 28(%eax)
+; X86-NEXT: movl $0, 24(%eax)
+; X86-NEXT: movl $0, 20(%eax)
+; X86-NEXT: movl $0, 16(%eax)
+; X86-NEXT: movl $0, 12(%eax)
+; X86-NEXT: movl $0, 8(%eax)
+; X86-NEXT: movl $0, 4(%eax)
+; X86-NEXT: movl $0, (%eax)
+; X86-NEXT: retl $4
;
; X64-LABEL: test_shl:
; X64: # %bb.0:
@@ -67,76 +67,62 @@ define <2 x i256> @test_shl(<2 x i256> %In) {
ret <2 x i256> %Out
}
-define <2 x i256> @test_srl(<2 x i256> %In) {
-; X32-LABEL: test_srl:
-; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: pushl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: pushl %edi
-; X32-NEXT: .cfi_def_cfa_offset 16
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 20
-; X32-NEXT: subl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 28
-; X32-NEXT: .cfi_offset %esi, -20
-; X32-NEXT: .cfi_offset %edi, -16
-; X32-NEXT: .cfi_offset %ebx, -12
-; X32-NEXT: .cfi_offset %ebp, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %ebp, %esi
-; X32-NEXT: shldl $28, %edx, %esi
-; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X32-NEXT: shldl $28, %ebx, %edx
-; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X32-NEXT: shldl $28, %ecx, %ebx
-; X32-NEXT: movl %ecx, %esi
-; X32-NEXT: shldl $28, %edi, %esi
-; X32-NEXT: shldl $28, %eax, %edi
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shldl $28, %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shrdl $4, %eax, %ecx
-; X32-NEXT: shrl $4, %ebp
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %ebp, 60(%eax)
-; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X32-NEXT: movl %ebp, 56(%eax)
-; X32-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X32-NEXT: movl %ebp, 52(%eax)
-; X32-NEXT: movl %ebx, 48(%eax)
-; X32-NEXT: movl %esi, 44(%eax)
-; X32-NEXT: movl %edi, 40(%eax)
-; X32-NEXT: movl %edx, 36(%eax)
-; X32-NEXT: movl %ecx, 32(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shrl $31, %ecx
-; X32-NEXT: movl %ecx, (%eax)
-; X32-NEXT: movl $0, 28(%eax)
-; X32-NEXT: movl $0, 24(%eax)
-; X32-NEXT: movl $0, 20(%eax)
-; X32-NEXT: movl $0, 16(%eax)
-; X32-NEXT: movl $0, 12(%eax)
-; X32-NEXT: movl $0, 8(%eax)
-; X32-NEXT: movl $0, 4(%eax)
-; X32-NEXT: addl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 20
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 16
-; X32-NEXT: popl %edi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: popl %ebp
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: retl $4
+define <2 x i256> @test_srl(<2 x i256> %In) nounwind {
+; X86-LABEL: test_srl:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %ebp, %esi
+; X86-NEXT: shldl $28, %edx, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl $28, %ebx, %edx
+; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-NEXT: shldl $28, %ecx, %ebx
+; X86-NEXT: movl %ecx, %esi
+; X86-NEXT: shldl $28, %edi, %esi
+; X86-NEXT: shldl $28, %eax, %edi
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shldl $28, %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shrdl $4, %eax, %ecx
+; X86-NEXT: shrl $4, %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %ebp, 60(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NEXT: movl %ebp, 56(%eax)
+; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
+; X86-NEXT: movl %ebp, 52(%eax)
+; X86-NEXT: movl %ebx, 48(%eax)
+; X86-NEXT: movl %esi, 44(%eax)
+; X86-NEXT: movl %edi, 40(%eax)
+; X86-NEXT: movl %edx, 36(%eax)
+; X86-NEXT: movl %ecx, 32(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shrl $31, %ecx
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: movl $0, 28(%eax)
+; X86-NEXT: movl $0, 24(%eax)
+; X86-NEXT: movl $0, 20(%eax)
+; X86-NEXT: movl $0, 16(%eax)
+; X86-NEXT: movl $0, 12(%eax)
+; X86-NEXT: movl $0, 8(%eax)
+; X86-NEXT: movl $0, 4(%eax)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl $4
;
; X64-LABEL: test_srl:
; X64: # %bb.0:
@@ -163,76 +149,62 @@ define <2 x i256> @test_srl(<2 x i256> %In) {
ret <2 x i256> %Out
}
-define <2 x i256> @test_sra(<2 x i256> %In) {
-; X32-LABEL: test_sra:
-; X32: # %bb.0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: pushl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: pushl %edi
-; X32-NEXT: .cfi_def_cfa_offset 16
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 20
-; X32-NEXT: subl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 28
-; X32-NEXT: .cfi_offset %esi, -20
-; X32-NEXT: .cfi_offset %edi, -16
-; X32-NEXT: .cfi_offset %ebx, -12
-; X32-NEXT: .cfi_offset %ebp, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %ebp, %esi
-; X32-NEXT: shldl $26, %edx, %esi
-; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X32-NEXT: shldl $26, %ebx, %edx
-; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X32-NEXT: shldl $26, %ecx, %ebx
-; X32-NEXT: movl %ecx, %esi
-; X32-NEXT: shldl $26, %edi, %esi
-; X32-NEXT: shldl $26, %eax, %edi
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shldl $26, %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shrdl $6, %eax, %ecx
-; X32-NEXT: sarl $6, %ebp
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %ebp, 60(%eax)
-; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X32-NEXT: movl %ebp, 56(%eax)
-; X32-NEXT: movl (%esp), %ebp # 4-byte Reload
-; X32-NEXT: movl %ebp, 52(%eax)
-; X32-NEXT: movl %ebx, 48(%eax)
-; X32-NEXT: movl %esi, 44(%eax)
-; X32-NEXT: movl %edi, 40(%eax)
-; X32-NEXT: movl %edx, 36(%eax)
-; X32-NEXT: movl %ecx, 32(%eax)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: sarl $31, %ecx
-; X32-NEXT: movl %ecx, 28(%eax)
-; X32-NEXT: movl %ecx, 24(%eax)
-; X32-NEXT: movl %ecx, 20(%eax)
-; X32-NEXT: movl %ecx, 16(%eax)
-; X32-NEXT: movl %ecx, 12(%eax)
-; X32-NEXT: movl %ecx, 8(%eax)
-; X32-NEXT: movl %ecx, 4(%eax)
-; X32-NEXT: movl %ecx, (%eax)
-; X32-NEXT: addl $8, %esp
-; X32-NEXT: .cfi_def_cfa_offset 20
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 16
-; X32-NEXT: popl %edi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: popl %ebx
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: popl %ebp
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: retl $4
+define <2 x i256> @test_sra(<2 x i256> %In) nounwind {
+; X86-LABEL: test_sra:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebp
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $8, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %ebp, %esi
+; X86-NEXT: shldl $26, %edx, %esi
+; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shldl $26, %ebx, %edx
+; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-NEXT: shldl $26, %ecx, %ebx
+; X86-NEXT: movl %ecx, %esi
+; X86-NEXT: shldl $26, %edi, %esi
+; X86-NEXT: shldl $26, %eax, %edi
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shldl $26, %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shrdl $6, %eax, %ecx
+; X86-NEXT: sarl $6, %ebp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %ebp, 60(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NEXT: movl %ebp, 56(%eax)
+; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
+; X86-NEXT: movl %ebp, 52(%eax)
+; X86-NEXT: movl %ebx, 48(%eax)
+; X86-NEXT: movl %esi, 44(%eax)
+; X86-NEXT: movl %edi, 40(%eax)
+; X86-NEXT: movl %edx, 36(%eax)
+; X86-NEXT: movl %ecx, 32(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: sarl $31, %ecx
+; X86-NEXT: movl %ecx, 28(%eax)
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl %ecx, 20(%eax)
+; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: addl $8, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: popl %ebp
+; X86-NEXT: retl $4
;
; X64-LABEL: test_sra:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/movdir-intrinsic-x86.ll b/llvm/test/CodeGen/X86/movdir-intrinsic-x86.ll
index 4d03510ad5d4..023dfb110502 100644
--- a/llvm/test/CodeGen/X86/movdir-intrinsic-x86.ll
+++ b/llvm/test/CodeGen/X86/movdir-intrinsic-x86.ll
@@ -18,7 +18,7 @@ define void @test_movdiri(ptr %p, i32 %v) {
;
; EGPR-LABEL: test_movdiri:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: movdiri %esi, (%rdi) # encoding: [0x62,0xf4,0x7c,0x08,0xf9,0x37]
+; EGPR-NEXT: movdiri %esi, (%rdi) # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xf9,0x37]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
call void @llvm.x86.directstore32(ptr %p, i32 %v)
@@ -42,7 +42,7 @@ define void @test_movdir64b(ptr %dst, ptr %src) {
;
; EGPR-LABEL: test_movdir64b:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: movdir64b (%rsi), %rdi # encoding: [0x62,0xf4,0x7d,0x08,0xf8,0x3e]
+; EGPR-NEXT: movdir64b (%rsi), %rdi # EVEX TO LEGACY Compression encoding: [0x66,0x0f,0x38,0xf8,0x3e]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
call void @llvm.x86.movdir64b(ptr %dst, ptr %src)
diff --git a/llvm/test/CodeGen/X86/movdir-intrinsic-x86_64.ll b/llvm/test/CodeGen/X86/movdir-intrinsic-x86_64.ll
index ddd44f6d73d5..e3736e29a582 100644
--- a/llvm/test/CodeGen/X86/movdir-intrinsic-x86_64.ll
+++ b/llvm/test/CodeGen/X86/movdir-intrinsic-x86_64.ll
@@ -10,7 +10,7 @@ define void @test_movdiri(ptr %p, i64 %v) {
;
; EGPR-LABEL: test_movdiri:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: movdiri %rsi, (%rdi) # encoding: [0x62,0xf4,0xfc,0x08,0xf9,0x37]
+; EGPR-NEXT: movdiri %rsi, (%rdi) # EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x38,0xf9,0x37]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
call void @llvm.x86.directstore64(ptr %p, i64 %v)
diff --git a/llvm/test/CodeGen/X86/opt-pipeline.ll b/llvm/test/CodeGen/X86/opt-pipeline.ll
index fb8d2335b341..6f2bba84a6ec 100644
--- a/llvm/test/CodeGen/X86/opt-pipeline.ll
+++ b/llvm/test/CodeGen/X86/opt-pipeline.ll
@@ -205,7 +205,7 @@
; CHECK-NEXT: X86 LEA Fixup
; CHECK-NEXT: X86 Fixup Inst Tuning
; CHECK-NEXT: X86 Fixup Vector Constants
-; CHECK-NEXT: Compressing EVEX instrs to VEX encoding when possible
+; CHECK-NEXT: Compressing EVEX instrs when possible
; CHECK-NEXT: X86 Discriminate Memory Operands
; CHECK-NEXT: X86 Insert Cache Prefetches
; CHECK-NEXT: X86 insert wait instruction
diff --git a/llvm/test/CodeGen/X86/sha.ll b/llvm/test/CodeGen/X86/sha.ll
index d8fa354a3913..65222ba74023 100644
--- a/llvm/test/CodeGen/X86/sha.ll
+++ b/llvm/test/CodeGen/X86/sha.ll
@@ -18,7 +18,7 @@ define <4 x i32> @test_sha1rnds4rr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable
;
; EGPR-LABEL: test_sha1rnds4rr:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1rnds4 $3, %xmm1, %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xd4,0xc1,0x03]
+; EGPR-NEXT: sha1rnds4 $3, %xmm1, %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x3a,0xcc,0xc1,0x03]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a, <4 x i32> %b, i8 3)
@@ -38,7 +38,7 @@ define <4 x i32> @test_sha1rnds4rm(<4 x i32> %a, ptr %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha1rnds4rm:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1rnds4 $3, (%rdi), %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xd4,0x07,0x03]
+; EGPR-NEXT: sha1rnds4 $3, (%rdi), %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x3a,0xcc,0x07,0x03]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <4 x i32>, ptr %b
@@ -61,7 +61,7 @@ define <4 x i32> @test_sha1nexterr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable
;
; EGPR-LABEL: test_sha1nexterr:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1nexte %xmm1, %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xd8,0xc1]
+; EGPR-NEXT: sha1nexte %xmm1, %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xc8,0xc1]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %a, <4 x i32> %b)
@@ -81,7 +81,7 @@ define <4 x i32> @test_sha1nexterm(<4 x i32> %a, ptr %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha1nexterm:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1nexte (%rdi), %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xd8,0x07]
+; EGPR-NEXT: sha1nexte (%rdi), %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xc8,0x07]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <4 x i32>, ptr %b
@@ -104,7 +104,7 @@ define <4 x i32> @test_sha1msg1rr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha1msg1rr:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1msg1 %xmm1, %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xd9,0xc1]
+; EGPR-NEXT: sha1msg1 %xmm1, %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xc9,0xc1]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %a, <4 x i32> %b)
@@ -124,7 +124,7 @@ define <4 x i32> @test_sha1msg1rm(<4 x i32> %a, ptr %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha1msg1rm:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1msg1 (%rdi), %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xd9,0x07]
+; EGPR-NEXT: sha1msg1 (%rdi), %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xc9,0x07]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <4 x i32>, ptr %b
@@ -147,7 +147,7 @@ define <4 x i32> @test_sha1msg2rr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha1msg2rr:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1msg2 %xmm1, %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xda,0xc1]
+; EGPR-NEXT: sha1msg2 %xmm1, %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xca,0xc1]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %a, <4 x i32> %b)
@@ -167,7 +167,7 @@ define <4 x i32> @test_sha1msg2rm(<4 x i32> %a, ptr %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha1msg2rm:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1msg2 (%rdi), %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xda,0x07]
+; EGPR-NEXT: sha1msg2 (%rdi), %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xca,0x07]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <4 x i32>, ptr %b
@@ -198,7 +198,7 @@ define <4 x i32> @test_sha256rnds2rr(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) n
; EGPR: # %bb.0: # %entry
; EGPR-NEXT: movaps %xmm0, %xmm3 # encoding: [0x0f,0x28,0xd8]
; EGPR-NEXT: movaps %xmm2, %xmm0 # encoding: [0x0f,0x28,0xc2]
-; EGPR-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3 # encoding: [0x62,0xf4,0x7c,0x08,0xdb,0xd9]
+; EGPR-NEXT: sha256rnds2 %xmm0, %xmm1, %xmm3 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xcb,0xd9]
; EGPR-NEXT: movaps %xmm3, %xmm0 # encoding: [0x0f,0x28,0xc3]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
@@ -227,7 +227,7 @@ define <4 x i32> @test_sha256rnds2rm(<4 x i32> %a, ptr %b, <4 x i32> %c) nounwin
; EGPR: # %bb.0: # %entry
; EGPR-NEXT: movaps %xmm0, %xmm2 # encoding: [0x0f,0x28,0xd0]
; EGPR-NEXT: movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
-; EGPR-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm2 # encoding: [0x62,0xf4,0x7c,0x08,0xdb,0x17]
+; EGPR-NEXT: sha256rnds2 %xmm0, (%rdi), %xmm2 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xcb,0x17]
; EGPR-NEXT: movaps %xmm2, %xmm0 # encoding: [0x0f,0x28,0xc2]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
@@ -251,7 +251,7 @@ define <4 x i32> @test_sha256msg1rr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable
;
; EGPR-LABEL: test_sha256msg1rr:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha256msg1 %xmm1, %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xdc,0xc1]
+; EGPR-NEXT: sha256msg1 %xmm1, %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xcc,0xc1]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %a, <4 x i32> %b)
@@ -271,7 +271,7 @@ define <4 x i32> @test_sha256msg1rm(<4 x i32> %a, ptr %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha256msg1rm:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha256msg1 (%rdi), %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xdc,0x07]
+; EGPR-NEXT: sha256msg1 (%rdi), %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xcc,0x07]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <4 x i32>, ptr %b
@@ -294,7 +294,7 @@ define <4 x i32> @test_sha256msg2rr(<4 x i32> %a, <4 x i32> %b) nounwind uwtable
;
; EGPR-LABEL: test_sha256msg2rr:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha256msg2 %xmm1, %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xdd,0xc1]
+; EGPR-NEXT: sha256msg2 %xmm1, %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xcd,0xc1]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %a, <4 x i32> %b)
@@ -314,7 +314,7 @@ define <4 x i32> @test_sha256msg2rm(<4 x i32> %a, ptr %b) nounwind uwtable {
;
; EGPR-LABEL: test_sha256msg2rm:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha256msg2 (%rdi), %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xdd,0x07]
+; EGPR-NEXT: sha256msg2 (%rdi), %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xcd,0x07]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
%0 = load <4 x i32>, ptr %b
@@ -338,7 +338,7 @@ define <8 x i32> @test_sha1rnds4_zero_extend(<4 x i32> %a, ptr %b) nounwind uwta
;
; EGPR-LABEL: test_sha1rnds4_zero_extend:
; EGPR: # %bb.0: # %entry
-; EGPR-NEXT: sha1rnds4 $3, (%rdi), %xmm0 # encoding: [0x62,0xf4,0x7c,0x08,0xd4,0x07,0x03]
+; EGPR-NEXT: sha1rnds4 $3, (%rdi), %xmm0 # EVEX TO LEGACY Compression encoding: [0x0f,0x3a,0xcc,0x07,0x03]
; EGPR-NEXT: xorps %xmm1, %xmm1 # encoding: [0x0f,0x57,0xc9]
; EGPR-NEXT: retq # encoding: [0xc3]
entry:
diff --git a/llvm/test/CodeGen/X86/test-shrink-bug.ll b/llvm/test/CodeGen/X86/test-shrink-bug.ll
index f05459f751bc..51a00d211421 100644
--- a/llvm/test/CodeGen/X86/test-shrink-bug.ll
+++ b/llvm/test/CodeGen/X86/test-shrink-bug.ll
@@ -68,8 +68,8 @@ define dso_local void @fail(i16 %a, <2 x i8> %b) {
; CHECK-X64-NEXT: je .LBB1_3
; CHECK-X64-NEXT: # %bb.1:
; CHECK-X64-NEXT: pcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; CHECK-X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8]
-; CHECK-X64-NEXT: pextrw $4, %xmm0, %eax
+; CHECK-X64-NEXT: pslld $8, %xmm0
+; CHECK-X64-NEXT: pextrw $1, %xmm0, %eax
; CHECK-X64-NEXT: testb $1, %al
; CHECK-X64-NEXT: jne .LBB1_3
; CHECK-X64-NEXT: # %bb.2: # %no
diff --git a/llvm/test/CodeGen/X86/vec_extract-avx.ll b/llvm/test/CodeGen/X86/vec_extract-avx.ll
index 6ca4e73d2f98..341a703a21bd 100644
--- a/llvm/test/CodeGen/X86/vec_extract-avx.ll
+++ b/llvm/test/CodeGen/X86/vec_extract-avx.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
; When extracting multiple consecutive elements from a larger
@@ -9,12 +9,12 @@
; Extracting the low elements only requires using the right kind of store.
define void @low_v8f32_to_v4f32(<8 x float> %v, ptr %ptr) {
-; X32-LABEL: low_v8f32_to_v4f32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vmovaps %xmm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: low_v8f32_to_v4f32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vmovaps %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: low_v8f32_to_v4f32:
; X64: # %bb.0:
@@ -35,12 +35,12 @@ define void @low_v8f32_to_v4f32(<8 x float> %v, ptr %ptr) {
; Extracting the high elements requires just one AVX instruction.
define void @high_v8f32_to_v4f32(<8 x float> %v, ptr %ptr) {
-; X32-LABEL: high_v8f32_to_v4f32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: high_v8f32_to_v4f32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vextractf128 $1, %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: high_v8f32_to_v4f32:
; X64: # %bb.0:
@@ -63,12 +63,12 @@ define void @high_v8f32_to_v4f32(<8 x float> %v, ptr %ptr) {
; if we were actually using the vector in this function and
; have AVX2, we should generate vextracti128 (the int version).
define void @high_v8i32_to_v4i32(<8 x i32> %v, ptr %ptr) {
-; X32-LABEL: high_v8i32_to_v4i32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: high_v8i32_to_v4i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vextractf128 $1, %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: high_v8i32_to_v4i32:
; X64: # %bb.0:
@@ -89,12 +89,12 @@ define void @high_v8i32_to_v4i32(<8 x i32> %v, ptr %ptr) {
; Make sure that element size doesn't alter the codegen.
define void @high_v4f64_to_v2f64(<4 x double> %v, ptr %ptr) {
-; X32-LABEL: high_v4f64_to_v2f64:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: vextractf128 $1, %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: high_v4f64_to_v2f64:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: vextractf128 $1, %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: high_v4f64_to_v2f64:
; X64: # %bb.0:
@@ -113,16 +113,16 @@ define void @high_v4f64_to_v2f64(<4 x double> %v, ptr %ptr) {
; FIXME - Ideally these should just call VMOVD/VMOVQ/VMOVSS/VMOVSD
define void @legal_vzmovl_2i32_8i32(ptr %in, ptr %out) {
-; X32-LABEL: legal_vzmovl_2i32_8i32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; X32-NEXT: vmovaps %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: legal_vzmovl_2i32_8i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X86-NEXT: vmovaps %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2i32_8i32:
; X64: # %bb.0:
@@ -140,14 +140,14 @@ define void @legal_vzmovl_2i32_8i32(ptr %in, ptr %out) {
}
define void @legal_vzmovl_2i64_4i64(ptr %in, ptr %out) {
-; X32-LABEL: legal_vzmovl_2i64_4i64:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: vmovaps %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: legal_vzmovl_2i64_4i64:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vmovaps %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2i64_4i64:
; X64: # %bb.0:
@@ -163,16 +163,16 @@ define void @legal_vzmovl_2i64_4i64(ptr %in, ptr %out) {
}
define void @legal_vzmovl_2f32_8f32(ptr %in, ptr %out) {
-; X32-LABEL: legal_vzmovl_2f32_8f32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; X32-NEXT: vmovaps %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: legal_vzmovl_2f32_8f32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; X86-NEXT: vmovaps %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2f32_8f32:
; X64: # %bb.0:
@@ -190,14 +190,14 @@ define void @legal_vzmovl_2f32_8f32(ptr %in, ptr %out) {
}
define void @legal_vzmovl_2f64_4f64(ptr %in, ptr %out) {
-; X32-LABEL: legal_vzmovl_2f64_4f64:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: vmovaps %ymm0, (%eax)
-; X32-NEXT: vzeroupper
-; X32-NEXT: retl
+; X86-LABEL: legal_vzmovl_2f64_4f64:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: vmovaps %ymm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
;
; X64-LABEL: legal_vzmovl_2f64_4f64:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vec_extract-mmx.ll b/llvm/test/CodeGen/X86/vec_extract-mmx.ll
index d9afc6f45931..672b4591316c 100644
--- a/llvm/test/CodeGen/X86/vec_extract-mmx.ll
+++ b/llvm/test/CodeGen/X86/vec_extract-mmx.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
define i32 @test0(ptr %v4) nounwind {
-; X32-LABEL: test0:
-; X32: # %bb.0: # %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: pshufw $238, (%eax), %mm0 # mm0 = mem[2,3,2,3]
-; X32-NEXT: movd %mm0, %eax
-; X32-NEXT: addl $32, %eax
-; X32-NEXT: retl
+; X86-LABEL: test0:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pshufw $238, (%eax), %mm0 # mm0 = mem[2,3,2,3]
+; X86-NEXT: movd %mm0, %eax
+; X86-NEXT: addl $32, %eax
+; X86-NEXT: retl
;
; X64-LABEL: test0:
; X64: # %bb.0: # %entry
@@ -32,14 +32,14 @@ entry:
}
define i32 @test1(ptr nocapture readonly %ptr) nounwind {
-; X32-LABEL: test1:
-; X32: # %bb.0: # %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movd (%eax), %mm0
-; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
-; X32-NEXT: movd %mm0, %eax
-; X32-NEXT: emms
-; X32-NEXT: retl
+; X86-LABEL: test1:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movd (%eax), %mm0
+; X86-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
+; X86-NEXT: movd %mm0, %eax
+; X86-NEXT: emms
+; X86-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0: # %entry
@@ -67,13 +67,13 @@ entry:
}
define i32 @test2(ptr nocapture readonly %ptr) nounwind {
-; X32-LABEL: test2:
-; X32: # %bb.0: # %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
-; X32-NEXT: movd %mm0, %eax
-; X32-NEXT: emms
-; X32-NEXT: retl
+; X86-LABEL: test2:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
+; X86-NEXT: movd %mm0, %eax
+; X86-NEXT: emms
+; X86-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
@@ -94,10 +94,10 @@ entry:
}
define i32 @test3(x86_mmx %a) nounwind {
-; X32-LABEL: test3:
-; X32: # %bb.0:
-; X32-NEXT: movd %mm0, %eax
-; X32-NEXT: retl
+; X86-LABEL: test3:
+; X86: # %bb.0:
+; X86-NEXT: movd %mm0, %eax
+; X86-NEXT: retl
;
; X64-LABEL: test3:
; X64: # %bb.0:
@@ -110,12 +110,12 @@ define i32 @test3(x86_mmx %a) nounwind {
; Verify we don't muck with extractelts from the upper lane.
define i32 @test4(x86_mmx %a) nounwind {
-; X32-LABEL: test4:
-; X32: # %bb.0:
-; X32-NEXT: movq2dq %mm0, %xmm0
-; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-NEXT: movd %xmm0, %eax
-; X32-NEXT: retl
+; X86-LABEL: test4:
+; X86: # %bb.0:
+; X86-NEXT: movq2dq %mm0, %xmm0
+; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-NEXT: movd %xmm0, %eax
+; X86-NEXT: retl
;
; X64-LABEL: test4:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vec_extract-sse4.ll b/llvm/test/CodeGen/X86/vec_extract-sse4.ll
index ea444d3a00af..1f384861b373 100644
--- a/llvm/test/CodeGen/X86/vec_extract-sse4.ll
+++ b/llvm/test/CodeGen/X86/vec_extract-sse4.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64
define void @t1(ptr %R, ptr %P1) nounwind {
-; X32-LABEL: t1:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT: movss %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: t1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: movss %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: t1:
; X64: # %bb.0:
@@ -23,11 +23,11 @@ define void @t1(ptr %R, ptr %P1) nounwind {
}
define float @t2(ptr %P1) nounwind {
-; X32-LABEL: t2:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: flds 8(%eax)
-; X32-NEXT: retl
+; X86-LABEL: t2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: flds 8(%eax)
+; X86-NEXT: retl
;
; X64-LABEL: t2:
; X64: # %bb.0:
@@ -39,13 +39,13 @@ define float @t2(ptr %P1) nounwind {
}
define void @t3(ptr %R, ptr %P1) nounwind {
-; X32-LABEL: t3:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl 12(%ecx), %ecx
-; X32-NEXT: movl %ecx, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: t3:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl 12(%ecx), %ecx
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: t3:
; X64: # %bb.0:
@@ -59,11 +59,11 @@ define void @t3(ptr %R, ptr %P1) nounwind {
}
define i32 @t4(ptr %P1) nounwind {
-; X32-LABEL: t4:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl 12(%eax), %eax
-; X32-NEXT: retl
+; X86-LABEL: t4:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl 12(%eax), %eax
+; X86-NEXT: retl
;
; X64-LABEL: t4:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vec_extract.ll b/llvm/test/CodeGen/X86/vec_extract.ll
index e753019593d8..087cd30abee9 100644
--- a/llvm/test/CodeGen/X86/vec_extract.ll
+++ b/llvm/test/CodeGen/X86/vec_extract.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2,-sse4.1 | FileCheck %s --check-prefix=X64
define void @test1(ptr %F, ptr %f) nounwind {
-; X32-LABEL: test1:
-; X32: # %bb.0: # %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT: addss %xmm0, %xmm0
-; X32-NEXT: movss %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: test1:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT: addss %xmm0, %xmm0
+; X86-NEXT: movss %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0: # %entry
@@ -27,17 +27,17 @@ entry:
}
define float @test2(ptr %F, ptr %f) nounwind {
-; X32-LABEL: test2:
-; X32: # %bb.0: # %entry
-; X32-NEXT: pushl %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movaps (%eax), %xmm0
-; X32-NEXT: addps %xmm0, %xmm0
-; X32-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
-; X32-NEXT: movss %xmm0, (%esp)
-; X32-NEXT: flds (%esp)
-; X32-NEXT: popl %eax
-; X32-NEXT: retl
+; X86-LABEL: test2:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movaps (%eax), %xmm0
+; X86-NEXT: addps %xmm0, %xmm0
+; X86-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; X86-NEXT: movss %xmm0, (%esp)
+; X86-NEXT: flds (%esp)
+; X86-NEXT: popl %eax
+; X86-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
@@ -53,14 +53,14 @@ entry:
}
define void @test3(ptr %R, ptr %P1) nounwind {
-; X32-LABEL: test3:
-; X32: # %bb.0: # %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movaps (%ecx), %xmm0
-; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; X32-NEXT: movss %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: test3:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movaps (%ecx), %xmm0
+; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; X86-NEXT: movss %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: test3:
; X64: # %bb.0: # %entry
@@ -76,16 +76,16 @@ entry:
}
define double @test4(double %A) nounwind {
-; X32-LABEL: test4:
-; X32: # %bb.0: # %entry
-; X32-NEXT: subl $12, %esp
-; X32-NEXT: calll foo@PLT
-; X32-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
-; X32-NEXT: addsd {{[0-9]+}}(%esp), %xmm0
-; X32-NEXT: movsd %xmm0, (%esp)
-; X32-NEXT: fldl (%esp)
-; X32-NEXT: addl $12, %esp
-; X32-NEXT: retl
+; X86-LABEL: test4:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: calll foo@PLT
+; X86-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
+; X86-NEXT: addsd {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT: movsd %xmm0, (%esp)
+; X86-NEXT: fldl (%esp)
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
;
; X64-LABEL: test4:
; X64: # %bb.0: # %entry
@@ -107,11 +107,11 @@ declare <2 x double> @foo()
; OSS-Fuzz #15662
; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=15662
define <4 x i32> @ossfuzz15662(ptr %in) {
-; X32-LABEL: ossfuzz15662:
-; X32: # %bb.0:
-; X32-NEXT: xorps %xmm0, %xmm0
-; X32-NEXT: movaps %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: ossfuzz15662:
+; X86: # %bb.0:
+; X86-NEXT: xorps %xmm0, %xmm0
+; X86-NEXT: movaps %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: ossfuzz15662:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vec_setcc.ll b/llvm/test/CodeGen/X86/vec_setcc.ll
index e7232a34f471..87e29261eaa4 100644
--- a/llvm/test/CodeGen/X86/vec_setcc.ll
+++ b/llvm/test/CodeGen/X86/vec_setcc.ll
@@ -308,9 +308,9 @@ define <3 x i1> @test_setcc_v3i1_v3i16(ptr %a) nounwind {
; SSE2-LABEL: test_setcc_v3i1_v3i16:
; SSE2: # %bb.0:
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpeqw %xmm0, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx
diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
index 3c53d211bae5..fe6836c045f3 100644
--- a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
+++ b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -7,7 +7,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512cd | FileCheck %s --check-prefixes=X64,AVX512,AVX512CD
;
; Just one 32-bit run to make sure we do reasonable things for i64 lzcnt.
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86-AVX
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
@@ -162,34 +162,34 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
-; X32-AVX-LABEL: testv4i64:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
-; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrld $16, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddd %ymm2, %ymm1, %ymm1
-; X32-AVX-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddq %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv4i64:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
+; X86-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
+; X86-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
+; X86-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
+; X86-AVX-NEXT: vpsrld $16, %ymm2, %ymm2
+; X86-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 0)
ret <4 x i64> %out
@@ -348,34 +348,34 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
-; X32-AVX-LABEL: testv4i64u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
-; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrld $16, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddd %ymm2, %ymm1, %ymm1
-; X32-AVX-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddq %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv4i64u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
+; X86-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
+; X86-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
+; X86-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm2
+; X86-AVX-NEXT: vpsrld $16, %ymm2, %ymm2
+; X86-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddd %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT: vpcmpeqd %ymm4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddq %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %in, i1 -1)
ret <4 x i64> %out
@@ -509,29 +509,29 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
-; X32-AVX-LABEL: testv8i32:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
-; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv8i32:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
+; X86-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
+; X86-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
+; X86-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 0)
ret <8 x i32> %out
@@ -665,29 +665,29 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512CD-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512CD-NEXT: retq
;
-; X32-AVX-LABEL: testv8i32u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
-; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv8i32u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
+; X86-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
+; X86-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
+; X86-AVX-NEXT: vpand %ymm2, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm2, %ymm1, %ymm1
+; X86-AVX-NEXT: vpcmpeqw %ymm4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddd %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %in, i1 -1)
ret <8 x i32> %out
@@ -792,24 +792,24 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; AVX512-NEXT: vpsubw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
-; X32-AVX-LABEL: testv16i16:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
-; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv16i16:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
+; X86-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
+; X86-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 0)
ret <16 x i16> %out
}
@@ -913,24 +913,24 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; AVX512-NEXT: vpsubw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
-; X32-AVX-LABEL: testv16i16u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
-; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv16i16u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
+; X86-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
+; X86-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm3, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm2, %ymm1
+; X86-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrlw $8, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 -1)
ret <16 x i16> %out
}
@@ -1014,19 +1014,19 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; AVX512-NEXT: vpsubb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
-; X32-AVX-LABEL: testv32i8:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; X32-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv32i8:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; X86-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; X86-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 0)
ret <32 x i8> %out
}
@@ -1110,19 +1110,19 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; AVX512-NEXT: vpsubb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: retq
;
-; X32-AVX-LABEL: testv32i8u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: # ymm1 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; X32-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv32i8u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; X86-AVX-NEXT: # ymm1 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; X86-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3
+; X86-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
+; X86-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 -1)
ret <32 x i8> %out
}
@@ -1133,10 +1133,10 @@ define <4 x i64> @foldv4i64() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv4i64:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv4i64:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
}
@@ -1147,10 +1147,10 @@ define <4 x i64> @foldv4i64u() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,64,56]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv4i64u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv4i64u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
}
@@ -1161,10 +1161,10 @@ define <8 x i32> @foldv8i32() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv8i32:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv8i32:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; X86-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 0)
ret <8 x i32> %out
}
@@ -1175,10 +1175,10 @@ define <8 x i32> @foldv8i32u() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv8i32u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv8i32u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [23,0,32,24,0,29,27,25]
+; X86-AVX-NEXT: retl
%out = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> <i32 256, i32 -1, i32 0, i32 255, i32 -65536, i32 7, i32 24, i32 88>, i1 -1)
ret <8 x i32> %out
}
@@ -1189,10 +1189,10 @@ define <16 x i16> @foldv16i16() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv16i16:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv16i16:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; X86-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 0)
ret <16 x i16> %out
}
@@ -1203,10 +1203,10 @@ define <16 x i16> @foldv16i16u() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv16i16u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv16i16u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [7,0,16,8,16,13,11,9,0,8,15,14,13,12,11,10]
+; X86-AVX-NEXT: retl
%out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> <i16 256, i16 -1, i16 0, i16 255, i16 -65536, i16 7, i16 24, i16 88, i16 -2, i16 254, i16 1, i16 2, i16 4, i16 8, i16 16, i16 32>, i1 -1)
ret <16 x i16> %out
}
@@ -1217,10 +1217,10 @@ define <32 x i8> @foldv32i8() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv32i8:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv32i8:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; X86-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 0)
ret <32 x i8> %out
}
@@ -1231,10 +1231,10 @@ define <32 x i8> @foldv32i8u() nounwind {
; X64-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
; X64-NEXT: retq
;
-; X32-AVX-LABEL: foldv32i8u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv32i8u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,8,0,8,5,3,1,0,0,7,6,5,4,3,2,1,0,8,8,0,0,0,0,0,0,0,0,6,5,5,1]
+; X86-AVX-NEXT: retl
%out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> <i8 256, i8 -1, i8 0, i8 255, i8 -65536, i8 7, i8 24, i8 88, i8 -2, i8 254, i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128, i8 256, i8 -256, i8 -128, i8 -64, i8 -32, i8 -16, i8 -8, i8 -4, i8 -2, i8 -1, i8 3, i8 5, i8 7, i8 127>, i1 -1)
ret <32 x i8> %out
}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
index 422f522e11f8..709be6534d77 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
@@ -1,17 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X32 %s
+; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X86 %s
; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X64 %s
; If there is no explicit MMX type usage, always promote to XMM.
-define void @test0(ptr %x) {
-; X32-LABEL: test0:
-; X32: ## %bb.0: ## %entry
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; X32-NEXT: movlps %xmm0, (%eax)
-; X32-NEXT: retl
+define void @test0(ptr %x) nounwind {
+; X86-LABEL: test0:
+; X86: ## %bb.0: ## %entry
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X86-NEXT: movlps %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: test0:
; X64: ## %bb.0: ## %entry
@@ -28,18 +28,16 @@ entry:
ret void
}
-define void @test1() {
-; X32-LABEL: test1:
-; X32: ## %bb.0: ## %entry
-; X32-NEXT: pushl %edi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: .cfi_offset %edi, -8
-; X32-NEXT: pxor %mm0, %mm0
-; X32-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm1 ## mm1 = 0x7070606040400000
-; X32-NEXT: xorl %edi, %edi
-; X32-NEXT: maskmovq %mm1, %mm0
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+define void @test1() nounwind {
+; X86-LABEL: test1:
+; X86: ## %bb.0: ## %entry
+; X86-NEXT: pushl %edi
+; X86-NEXT: pxor %mm0, %mm0
+; X86-NEXT: movq {{\.?LCPI[0-9]+_[0-9]+}}, %mm1 ## mm1 = 0x7070606040400000
+; X86-NEXT: xorl %edi, %edi
+; X86-NEXT: maskmovq %mm1, %mm0
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: test1:
; X64: ## %bb.0: ## %entry
@@ -63,13 +61,13 @@ entry:
@tmp_V2i = common global <2 x i32> zeroinitializer
define void @test2() nounwind {
-; X32-LABEL: test2:
-; X32: ## %bb.0: ## %entry
-; X32-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax
-; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; X32-NEXT: movlps %xmm0, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: test2:
+; X86: ## %bb.0: ## %entry
+; X86-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax
+; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; X86-NEXT: movlps %xmm0, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: test2:
; X64: ## %bb.0: ## %entry
@@ -86,21 +84,21 @@ entry:
}
define <4 x float> @pr35869() nounwind {
-; X32-LABEL: pr35869:
-; X32: ## %bb.0:
-; X32-NEXT: movl $64, %eax
-; X32-NEXT: movd %eax, %mm0
-; X32-NEXT: pxor %mm1, %mm1
-; X32-NEXT: punpcklbw %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3]
-; X32-NEXT: pcmpgtw %mm0, %mm1
-; X32-NEXT: movq %mm0, %mm2
-; X32-NEXT: punpckhwd %mm1, %mm2 ## mm2 = mm2[2],mm1[2],mm2[3],mm1[3]
-; X32-NEXT: xorps %xmm0, %xmm0
-; X32-NEXT: cvtpi2ps %mm2, %xmm0
-; X32-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
-; X32-NEXT: punpcklwd %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
-; X32-NEXT: cvtpi2ps %mm0, %xmm0
-; X32-NEXT: retl
+; X86-LABEL: pr35869:
+; X86: ## %bb.0:
+; X86-NEXT: movl $64, %eax
+; X86-NEXT: movd %eax, %mm0
+; X86-NEXT: pxor %mm1, %mm1
+; X86-NEXT: punpcklbw %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1],mm0[2],mm1[2],mm0[3],mm1[3]
+; X86-NEXT: pcmpgtw %mm0, %mm1
+; X86-NEXT: movq %mm0, %mm2
+; X86-NEXT: punpckhwd %mm1, %mm2 ## mm2 = mm2[2],mm1[2],mm2[3],mm1[3]
+; X86-NEXT: xorps %xmm0, %xmm0
+; X86-NEXT: cvtpi2ps %mm2, %xmm0
+; X86-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; X86-NEXT: punpcklwd %mm1, %mm0 ## mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
+; X86-NEXT: cvtpi2ps %mm0, %xmm0
+; X86-NEXT: retl
;
; X64-LABEL: pr35869:
; X64: ## %bb.0:
diff --git a/llvm/test/CodeGen/X86/vector-tzcnt-256.ll b/llvm/test/CodeGen/X86/vector-tzcnt-256.ll
index cf3803aa460e..5bcdf0e22a5a 100644
--- a/llvm/test/CodeGen/X86/vector-tzcnt-256.ll
+++ b/llvm/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -9,7 +9,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bitalg,+avx512vl | FileCheck %s --check-prefixes=ALL,BITALG
;
; Just one 32-bit run to make sure we do reasonable things for i64 tzcnt.
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,X32-AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,X86-AVX
define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX1-LABEL: testv4i64:
@@ -115,23 +115,23 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; BITALG-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv4i64:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv4i64:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 0)
ret <4 x i64> %out
}
@@ -240,23 +240,23 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; BITALG-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv4i64u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv4i64u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> %in, i1 -1)
ret <4 x i64> %out
}
@@ -385,27 +385,27 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; BITALG-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv8i32:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X32-AVX-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm2, %ymm2
-; X32-AVX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv8i32:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; X86-AVX-NEXT: vpsadbw %ymm1, %ymm2, %ymm2
+; X86-AVX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; X86-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 0)
ret <8 x i32> %out
}
@@ -534,27 +534,27 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; BITALG-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv8i32u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; X32-AVX-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm2, %ymm2
-; X32-AVX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv8i32u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X86-AVX-NEXT: vpunpckhdq {{.*#+}} ymm2 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7]
+; X86-AVX-NEXT: vpsadbw %ymm1, %ymm2, %ymm2
+; X86-AVX-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5]
+; X86-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <8 x i32> @llvm.cttz.v8i32(<8 x i32> %in, i1 -1)
ret <8 x i32> %out
}
@@ -685,24 +685,24 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; BITALG-NEXT: vpopcntw %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv16i16:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsllw $8, %ymm0, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv16i16:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsllw $8, %ymm0, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 0)
ret <16 x i16> %out
}
@@ -833,24 +833,24 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; BITALG-NEXT: vpopcntw %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv16i16u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsllw $8, %ymm0, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm1, %ymm0
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv16i16u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: vpsllw $8, %ymm0, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm0, %ymm1, %ymm0
+; X86-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <16 x i16> @llvm.cttz.v16i16(<16 x i16> %in, i1 -1)
ret <16 x i16> %out
}
@@ -978,21 +978,21 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; BITALG-NEXT: vpopcntb %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv32i8:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv32i8:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 0)
ret <32 x i8> %out
}
@@ -1120,21 +1120,21 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; BITALG-NEXT: vpopcntb %ymm0, %ymm0
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: testv32i8u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
-; X32-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: # ymm3 = mem[0,1,0,1]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: testv32i8u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
+; X86-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm1
+; X86-AVX-NEXT: vpandn %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2
+; X86-AVX-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; X86-AVX-NEXT: # ymm3 = mem[0,1,0,1]
+; X86-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
+; X86-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
+; X86-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X86-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
+; X86-AVX-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; X86-AVX-NEXT: retl
%out = call <32 x i8> @llvm.cttz.v32i8(<32 x i8> %in, i1 -1)
ret <32 x i8> %out
}
@@ -1155,10 +1155,10 @@ define <4 x i64> @foldv4i64() nounwind {
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: foldv4i64:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv4i64:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
}
@@ -1179,10 +1179,10 @@ define <4 x i64> @foldv4i64u() nounwind {
; BITALG-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,64,0]
; BITALG-NEXT: retq
;
-; X32-AVX-LABEL: foldv4i64u:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
-; X32-AVX-NEXT: retl
+; X86-AVX-LABEL: foldv4i64u:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
+; X86-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
}
diff --git a/llvm/test/CodeGen/X86/x64-cet-intrinsics.ll b/llvm/test/CodeGen/X86/x64-cet-intrinsics.ll
index bf87ae5cac05..f73e26a309aa 100644
--- a/llvm/test/CodeGen/X86/x64-cet-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/x64-cet-intrinsics.ll
@@ -119,7 +119,7 @@ define void @test_wrssd(i32 %a, ptr %__p) {
;
; EGPR-LABEL: test_wrssd:
; EGPR: ## %bb.0: ## %entry
-; EGPR-NEXT: wrssd %edi, (%rsi) ## encoding: [0x62,0xf4,0x7c,0x08,0x66,0x3e]
+; EGPR-NEXT: wrssd %edi, (%rsi) ## EVEX TO LEGACY Compression encoding: [0x0f,0x38,0xf6,0x3e]
; EGPR-NEXT: retq ## encoding: [0xc3]
entry:
tail call void @llvm.x86.wrssd(i32 %a, ptr %__p)
@@ -136,7 +136,7 @@ define void @test_wrssq(i64 %a, ptr %__p) {
;
; EGPR-LABEL: test_wrssq:
; EGPR: ## %bb.0: ## %entry
-; EGPR-NEXT: wrssq %rdi, (%rsi) ## encoding: [0x62,0xf4,0xfc,0x08,0x66,0x3e]
+; EGPR-NEXT: wrssq %rdi, (%rsi) ## EVEX TO LEGACY Compression encoding: [0x48,0x0f,0x38,0xf6,0x3e]
; EGPR-NEXT: retq ## encoding: [0xc3]
entry:
tail call void @llvm.x86.wrssq(i64 %a, ptr %__p)
@@ -153,7 +153,7 @@ define void @test_wrussd(i32 %a, ptr %__p) {
;
; EGPR-LABEL: test_wrussd:
; EGPR: ## %bb.0: ## %entry
-; EGPR-NEXT: wrussd %edi, (%rsi) ## encoding: [0x62,0xf4,0x7d,0x08,0x65,0x3e]
+; EGPR-NEXT: wrussd %edi, (%rsi) ## EVEX TO LEGACY Compression encoding: [0x66,0x0f,0x38,0xf5,0x3e]
; EGPR-NEXT: retq ## encoding: [0xc3]
entry:
tail call void @llvm.x86.wrussd(i32 %a, ptr %__p)
@@ -170,7 +170,7 @@ define void @test_wrussq(i64 %a, ptr %__p) {
;
; EGPR-LABEL: test_wrussq:
; EGPR: ## %bb.0: ## %entry
-; EGPR-NEXT: wrussq %rdi, (%rsi) ## encoding: [0x62,0xf4,0xfd,0x08,0x65,0x3e]
+; EGPR-NEXT: wrussq %rdi, (%rsi) ## EVEX TO LEGACY Compression encoding: [0x66,0x48,0x0f,0x38,0xf5,0x3e]
; EGPR-NEXT: retq ## encoding: [0xc3]
entry:
tail call void @llvm.x86.wrussq(i64 %a, ptr %__p)
diff --git a/llvm/test/DebugInfo/X86/sdag-dangling-dbgvalue.ll b/llvm/test/DebugInfo/X86/sdag-dangling-dbgvalue.ll
index 629c236f6831..600d6d837964 100644
--- a/llvm/test/DebugInfo/X86/sdag-dangling-dbgvalue.ll
+++ b/llvm/test/DebugInfo/X86/sdag-dangling-dbgvalue.ll
@@ -5,6 +5,16 @@
; RUN: -experimental-debug-variable-locations=true \
; RUN: | FileCheck %s --check-prefixes=CHECK,INSTRREF
+; Repeat checks with experimental debginfo iterators.
+; RUN: llc %s -stop-before finalize-isel -o - \
+; RUN: -try-experimental-debuginfo-iterators \
+; RUN: -experimental-debug-variable-locations=false \
+; RUN: | FileCheck %s --check-prefixes=CHECK,DBGVALUE
+; RUN: llc %s -stop-before finalize-isel -o - \
+; RUN: -try-experimental-debuginfo-iterators \
+; RUN: -experimental-debug-variable-locations=true \
+; RUN: | FileCheck %s --check-prefixes=CHECK,INSTRREF
+
;--------------------------------------------------------------------
; This test case is basically generated from the following C code.
; Compiled with "--target=x86_64-apple-darwin -S -g -O3" to get debug
diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll
index 9ba2dce6145f..02c58a1f4922 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/asan-stack-safety.ll
@@ -50,3 +50,21 @@ define void @cmpxchg(i8 %compare_to, i8 %new_value) sanitize_address {
; NOSAFETY: call void @__asan_store1
ret void
}
+
+%struct.S = type { i32, i32 }
+
+; CHECK-LABEL: define %struct.S @exchange(
+; NOSAFETY: call i64 @__asan_stack_malloc
+; CHECK: call ptr @__asan_memcpy(
+; CHECK: call ptr @__asan_memcpy(
+; NOSAFETY: call void @__asan_loadN(
+define %struct.S @exchange(ptr %a, ptr %b) sanitize_address {
+entry:
+ %tmp = alloca %struct.S, align 4
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 %a, i64 8, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a, ptr align 4 %b, i64 8, i1 false)
+ %ret = load %struct.S, ptr %tmp
+ ret %struct.S %ret
+}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) nounwind
diff --git a/llvm/test/Instrumentation/AddressSanitizer/debug_info_noninstrumented_alloca2.ll b/llvm/test/Instrumentation/AddressSanitizer/debug_info_noninstrumented_alloca2.ll
index dff4e4be4acf..fb37b8742365 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/debug_info_noninstrumented_alloca2.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/debug_info_noninstrumented_alloca2.ll
@@ -1,17 +1,18 @@
; Make sure we don't break the IR when moving non-instrumented allocas
-; RUN: opt < %s -passes=asan -S | FileCheck %s
-; RUN: opt < %s -passes=asan -asan-instrument-dynamic-allocas -S | FileCheck %s
+; RUN: opt < %s -passes=asan -asan-use-stack-safety=1 -S | FileCheck %s
+; RUN: opt < %s -passes=asan -asan-use-stack-safety=1 -asan-instrument-dynamic-allocas -S | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
-define i32 @foo() sanitize_address {
+define i32 @foo(i64 %i) sanitize_address {
entry:
%non_instrumented1 = alloca i32, align 4
%t = load i32, ptr %non_instrumented1, align 4
- %instrumented = alloca i32, align 4
- %ptr = ptrtoint ptr %instrumented to i32
+ %instrumented = alloca [2 x i32], align 4
+ %ai = getelementptr inbounds [2 x i32], ptr %instrumented, i64 0, i64 %i
+ store volatile i8 0, ptr %ai, align 4
ret i32 %t
}
diff --git a/llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll b/llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll
index 5fab725565c9..5ecd4dc7fb94 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/hoist-argument-init-insts.ll
@@ -1,4 +1,4 @@
-; RUN: opt < %s -passes=asan -S | FileCheck %s
+; RUN: opt < %s -passes=asan -asan-use-after-scope -asan-use-stack-safety=1 -S | FileCheck %s
; Source (-O0 -fsanitize=address -fsanitize-address-use-after-scope):
;; struct S { int x, y; };
@@ -18,15 +18,16 @@ target triple = "x86_64-apple-macosx10.14.0"
; CHECK-LABEL: define {{.*}} @_Z4swapP1SS0_b(
; First come the argument allocas.
-; CHECK: [[argA:%.*]] = alloca ptr,
-; CHECK-NEXT: [[argB:%.*]] = alloca ptr,
-; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
+; CHECK: %a.addr = alloca ptr, align 8
+; CHECK-NEXT: %b.addr = alloca ptr, align 8
+; CHECK-NEXT: %doit.addr = alloca i8, align 1
; Next, the stores into the argument allocas.
-; CHECK-NEXT: store ptr {{.*}}, ptr [[argA]]
-; CHECK-NEXT: store ptr {{.*}}, ptr [[argB]]
+; CHECK-NEXT: store ptr {{.*}}, ptr %a.addr
+; CHECK-NEXT: store ptr {{.*}}, ptr %b.addr
; CHECK-NEXT: [[frombool:%.*]] = zext i1 {{.*}} to i8
-; CHECK-NEXT: store i8 [[frombool]], ptr [[argDoit]]
+; CHECK-NEXT: store i8 %frombool, ptr %doit.addr, align 1
+; CHECK-NEXT: [[stack_base:%.*]] = alloca i64, align 8
define void @_Z4swapP1SS0_b(ptr %a, ptr %b, i1 zeroext %doit) sanitize_address {
entry:
@@ -65,9 +66,12 @@ return: ; preds = %if.end, %if.then
; CHECK: [[argA:%.*]] = alloca ptr,
; CHECK-NEXT: [[argB:%.*]] = alloca ptr,
; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
+; CHECK-NEXT: %tmp = alloca %struct.S, align 4
; CHECK-NEXT: store ptr {{.*}}, ptr [[argA]]
; CHECK-NEXT: store ptr {{.*}}, ptr [[argB]]
-; CHECK-NEXT: [[stack_base:%.*]] = alloca i64
+; CHECK-NEXT: %0 = load i8, ptr %doit.addr, align 1
+; CHECK-NEXT: %frombool = zext i1 %doit to i8
+; CHECK-NEXT: store i8 %frombool, ptr %doit.addr, align 1
define void @func_with_load_in_arginit_sequence(ptr %a, ptr %b, i1 zeroext %doit) sanitize_address {
entry:
%a.addr = alloca ptr, align 8
@@ -108,10 +112,13 @@ return: ; preds = %if.end, %if.then
; CHECK: [[argA:%.*]] = alloca ptr,
; CHECK-NEXT: [[argB:%.*]] = alloca ptr,
; CHECK-NEXT: [[argDoit:%.*]] = alloca i8,
+; CHECK-NEXT: %tmp = alloca %struct.S, align 4
+; CHECK-NEXT: %tmp2 = alloca %struct.S, align 4
; CHECK-NEXT: store ptr {{.*}}, ptr [[argA]]
; CHECK-NEXT: store ptr {{.*}}, ptr [[argB]]
; CHECK-NEXT: [[frombool:%.*]] = zext i1 {{.*}} to i8
; CHECK-NEXT: store i8 [[frombool]], ptr [[argDoit]]
+; CHECK-NEXT: %0 = load i8, ptr %doit.addr, align 1
define void @func_with_multiple_interesting_allocas(ptr %a, ptr %b, i1 zeroext %doit) sanitize_address {
entry:
%a.addr = alloca ptr, align 8
diff --git a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
index b57605a65567..7f158487a47a 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/lifetime.ll
@@ -8,23 +8,24 @@ target triple = "x86_64-unknown-linux-gnu"
declare void @llvm.lifetime.start.p0(i64, ptr nocapture) nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture) nounwind
-define void @lifetime_no_size() sanitize_address {
- ; CHECK-LABEL: define void @lifetime_no_size()
+; CHECK-LABEL: define void @lifetime_no_size(
+define void @lifetime_no_size(i64 %i) sanitize_address {
entry:
- %i = alloca i32, align 4
+ %a = alloca [2 x i32], align 4
- ; Poison memory in prologue: F1F1F1F104F3F3F3
- ; CHECK: store i64 -868083100587789839, ptr %{{[0-9]+}}
+ ; Poison memory in prologue: 0xf3f3f300f1f1f1f1
+ ; CHECK: store i64 -868083117767659023, ptr %[[#]]
- call void @llvm.lifetime.start.p0(i64 -1, ptr %i)
+ call void @llvm.lifetime.start.p0(i64 -1, ptr %a)
; Check that lifetime with no size are ignored.
; CHECK-NOT: store
; CHECK: call void @llvm.lifetime.start
- store volatile i8 0, ptr %i
+ %ai = getelementptr inbounds [2 x i32], ptr %a, i64 0, i64 %i
+ store volatile i8 0, ptr %ai, align 4
; CHECK: store volatile
- call void @llvm.lifetime.end.p0(i64 -1, ptr %i)
+ call void @llvm.lifetime.end.p0(i64 -1, ptr %a)
; Check that lifetime with no size are ignored.
; CHECK-NOT: store
; CHECK: call void @llvm.lifetime.end
@@ -128,24 +129,26 @@ bb1:
}
; Check that arguments of lifetime may come from getelementptr nodes.
-define void @getelementptr_args() sanitize_address{
+define void @getelementptr_args(i64 %i) sanitize_address{
; CHECK-LABEL: define void @getelementptr_args
entry:
%x = alloca [1024 x i8], align 16
- %d = alloca ptr, align 8
+ %a = alloca [2 x ptr], align 8
; F1F1F1F1
; CHECK: store i32 -235802127, ptr %{{[0-9]+}}
- ; F3F3F3F3F3F3F3F3
- ; CHECK: store i64 -868082074056920077, ptr %{{[0-9]+}}
- ; F3F3F3F3F3F3F3F3
- ; CHECK: store i64 -868082074056920077, ptr %{{[0-9]+}}
+ ; CHECK: call void @__asan_set_shadow_f8(i64 %[[#]], i64 128)
+ ; 0xf2f2f2f2f2f2f2f2
+ ; CHECK: store i64 -940422246894996750, ptr %[[#]]
+ ; 0xf2f2f2f2f2f2f2f2
+ ; CHECK: store i64 -940422246894996750, ptr %[[#]]
call void @llvm.lifetime.start.p0(i64 1024, ptr %x)
; CHECK: call void @__asan_set_shadow_00(i64 %{{[0-9]+}}, i64 128)
; CHECK-NEXT: call void @llvm.lifetime.start
- store ptr %x, ptr %d, align 8
+ %ai = getelementptr inbounds [2 x ptr], ptr %a, i64 0, i64 %i
+ store ptr %x, ptr %ai, align 8
; CHECK: store ptr
call void @llvm.lifetime.end.p0(i64 1024, ptr %x)
@@ -154,6 +157,7 @@ entry:
ret void
; CHECK: call void @__asan_set_shadow_00(i64 %{{[0-9]+}}, i64 148)
+ ; CHECK: store i16 0, ptr %[[#]], align 1
; CHECK-NEXT: ret void
}
diff --git a/llvm/test/Instrumentation/InstrProfiling/platform.ll b/llvm/test/Instrumentation/InstrProfiling/platform.ll
index 81912f3c0b6a..9c76a5caf2a5 100644
--- a/llvm/test/Instrumentation/InstrProfiling/platform.ll
+++ b/llvm/test/Instrumentation/InstrProfiling/platform.ll
@@ -8,6 +8,7 @@
; RUN: opt < %s -mtriple=x86_64-pc-solaris -passes=instrprof -S | FileCheck %s -check-prefixes=SOLARIS,ELF
; RUN: opt < %s -mtriple=x86_64-pc-windows -passes=instrprof -S | FileCheck %s -check-prefix=WINDOWS
; RUN: opt < %s -mtriple=powerpc64-ibm-aix-xcoff -passes=instrprof -S | FileCheck %s -check-prefix=AIX
+; RUN: opt < %s -mtriple=arm-elf -passes=instrprof -S | FileCheck %s -check-prefix=BAREMETAL
@__profn_foo = private constant [3 x i8] c"foo"
; MACHO-NOT: __profn_foo
@@ -46,6 +47,7 @@ declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
; PS4-NOT: define internal void @__llvm_profile_register_functions
; WINDOWS-NOT: define internal void @__llvm_profile_register_functions
; AIX-NOT: define internal void @__llvm_profile_register_functions
+; BAREMETAL-NOT: define internal void @__llvm_profile_register_functions
;; PR38340: When dynamic registration is used, we had a bug where we'd register
;; something that's not a __profd_* variable.
@@ -57,3 +59,4 @@ declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
; PS4-NOT: define internal void @__llvm_profile_init
; WINDOWS-NOT: define internal void @__llvm_profile_init
; AIX-NOT: define internal void @__llvm_profile_init
+; BAREMETAL-NOT: define internal void @__llvm_profile_init
diff --git a/llvm/test/Instrumentation/InstrProfiling/profiling.ll b/llvm/test/Instrumentation/InstrProfiling/profiling.ll
index caff611b98cc..e7678a9dce08 100644
--- a/llvm/test/Instrumentation/InstrProfiling/profiling.ll
+++ b/llvm/test/Instrumentation/InstrProfiling/profiling.ll
@@ -1,7 +1,6 @@
;; Test runtime symbols and various linkages.
; RUN: opt < %s -mtriple=x86_64-apple-macosx10.10.0 -passes=instrprof -S | FileCheck %s --check-prefixes=MACHO
-; RUN: opt < %s -mtriple=x86_64 -passes=instrprof -S | FileCheck %s --check-prefix=ELF_GENERIC
; RUN: opt < %s -mtriple=x86_64-unknown-linux -passes=instrprof -S | FileCheck %s --check-prefixes=ELF,ELFRT
; RUN: opt < %s -mtriple=x86_64-unknown-fuchsia -passes=instrprof -S | FileCheck %s --check-prefixes=ELF,ELFRT
; RUN: opt < %s -mtriple=x86_64-scei-ps4 -passes=instrprof -S | FileCheck %s --check-prefixes=ELF,PS
@@ -9,12 +8,13 @@
; RUN: opt < %s -mtriple=x86_64-pc-win32-coff -passes=instrprof -S | FileCheck %s --check-prefixes=COFF
; RUN: opt < %s -mtriple=powerpc64-ibm-aix-xcoff -passes=instrprof -S | FileCheck %s --check-prefixes=XCOFF
; RUN: opt < %s -mtriple=x86_64-pc-freebsd13 -passes=instrprof -S | FileCheck %s --check-prefixes=ELF
+; RUN: opt < %s -mtriple=wasm32-unknown-unknown -passes=instrprof -S | FileCheck %s --check-prefix=WASM
; MACHO: @__llvm_profile_runtime = external hidden global i32
-; ELF_GENERIC: @__llvm_profile_runtime = external hidden global i32
; ELF-NOT: @__llvm_profile_runtime = external global i32
; XCOFF-NOT: @__llvm_profile_runtime = external hidden global i32
; COFF: @__llvm_profile_runtime = external hidden global i32
+; WASM: @__llvm_profile_runtime = external hidden global i32
; ELF: $__profc_foo = comdat nodeduplicate
; ELF: $__profc_foo_weak = comdat nodeduplicate
@@ -98,10 +98,10 @@ define available_externally void @foo_extern() {
declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
; ELF: @llvm.compiler.used = appending global {{.*}} [{{.*}}ptr @__profd_foo, ptr @__profd_foo_weak, ptr @"__profd_linkage.ll:foo_internal", ptr @__profd_foo_inline, ptr @__profd_foo_extern{{.*}}]
-; ELF_GENERIC: @llvm.compiler.used = appending global [6 x ptr] [ptr @__llvm_profile_runtime, ptr @__profd_foo, ptr @__profd_foo_weak, ptr @"__profd_linkage.ll:foo_internal", ptr @__profd_foo_inline, ptr @__profd_foo_extern]
; MACHO: @llvm.compiler.used = appending global [6 x ptr] [ptr @__llvm_profile_runtime_user, ptr @__profd_foo, {{.*}}
; COFF: @llvm.compiler.used = appending global [6 x ptr] [ptr @__llvm_profile_runtime_user, ptr @__profd_foo, ptr @__profd_foo_weak, ptr @"__profd_linkage.ll:foo_internal", ptr @__profd_foo_inline, ptr @__profd_foo_extern]
; XCOFF: @llvm.used = appending global [6 x ptr] [ptr @__profd_foo, ptr @__profd_foo_weak, ptr @"__profd_linkage.ll:foo_internal", ptr @__profd_foo_inline, ptr @__profd_foo_extern, ptr @__llvm_prf_nm]
+; WASM: @llvm.used = appending global [7 x ptr] [ptr @__llvm_profile_runtime_user, ptr @__profd_foo, ptr @__profd_foo_weak, ptr @"__profd_linkage.ll:foo_internal", ptr @__profd_foo_inline, ptr @__profd_foo_extern, ptr @__llvm_prf_nm]
; MACHO: define linkonce_odr hidden i32 @__llvm_profile_runtime_user() {{.*}} {
; MACHO: %[[REG:.*]] = load i32, ptr @__llvm_profile_runtime
@@ -114,12 +114,11 @@ declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
; PS: %[[REG:.*]] = load i32, ptr @__llvm_profile_runtime
; XCOFF-NOT: define .* __llvm_profile_runtime_user
-; ELF_GENERIC: define internal void @__llvm_profile_register_functions() unnamed_addr {
-; ELF_GENERIC-NEXT: call void @__llvm_profile_register_function(ptr @__llvm_profile_runtime)
-; ELF_GENERIC-NEXT: call void @__llvm_profile_register_function(ptr @__profd_foo)
-; ELF_GENERIC-NEXT: call void @__llvm_profile_register_function(ptr @__profd_foo_weak)
-; ELF_GENERIC: call void @__llvm_profile_register_names_function(ptr @__llvm_prf_nm
-; ELF_GENERIC-NEXT: ret void
-; ELF_GENERIC-NEXT: }
+; WASM: define internal void @__llvm_profile_register_functions() unnamed_addr {
+; WASM-NEXT: call void @__llvm_profile_register_function(ptr @__profd_foo)
+; WASM-NEXT: call void @__llvm_profile_register_function(ptr @__profd_foo_weak)
+; WASM: call void @__llvm_profile_register_names_function(ptr @__llvm_prf_nm
+; WASM-NEXT: ret void
+; WASM-NEXT: }
; XCOFF-NOT: internal void @__llvm_profile_register_functions()
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vflat.s b/llvm/test/MC/AMDGPU/gfx12_asm_vflat.s
index 95d352b421a2..daf25d314c78 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vflat.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vflat.s
@@ -1920,6 +1920,33 @@ global_store_d16_hi_b8 v[0:1], v2, off offset:64
global_store_d16_hi_b8 v[3:4], v1, off
// GFX12: encoding: [0x7c,0x00,0x09,0xee,0x00,0x00,0x80,0x00,0x03,0x00,0x00,0x00]
+global_inv
+// GFX12: encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_inv scope:SCOPE_DEV
+// GFX12: encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+
+global_inv scope:SCOPE_SYS
+// GFX12: encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+
+global_wb
+// GFX12: encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_wb scope:SCOPE_DEV
+// GFX12: encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+
+global_wb scope:SCOPE_SYS
+// GFX12: encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+
+global_wbinv
+// GFX12: encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+
+global_wbinv scope:SCOPE_DEV
+// GFX12: encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+
+global_wbinv scope:SCOPE_SYS
+// GFX12: encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+
scratch_load_b128 v[1:4], off, off offset:-64
// GFX12: encoding: [0x7c,0xc0,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vflat.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vflat.txt
index f4038cf10f50..7365adb864fd 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vflat.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vflat.txt
@@ -1137,6 +1137,33 @@
# GFX12: global_store_d16_hi_b8 v[3:4], v1, off ; encoding: [0x7c,0x00,0x09,0xee,0x00,0x00,0x80,0x00,0x03,0x00,0x00,0x00]
0x7c,0x00,0x09,0xee,0x00,0x00,0x80,0x00,0x03,0x00,0x00,0x00
+# GFX12: global_inv ; encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x0a,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_inv scope:SCOPE_DEV ; encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x0a,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_inv scope:SCOPE_SYS ; encoding: [0x7c,0xc0,0x0a,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x0a,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_wb ; encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+0x7c,0x00,0x0b,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_wb scope:SCOPE_DEV ; encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+0x7c,0x00,0x0b,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_wb scope:SCOPE_SYS ; encoding: [0x7c,0x00,0x0b,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+0x7c,0x00,0x0b,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_wbinv ; encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x13,0xee,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_wbinv scope:SCOPE_DEV ; encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x13,0xee,0x00,0x00,0x08,0x00,0x00,0x00,0x00,0x00
+
+# GFX12: global_wbinv scope:SCOPE_SYS ; encoding: [0x7c,0xc0,0x13,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00]
+0x7c,0xc0,0x13,0xee,0x00,0x00,0x0c,0x00,0x00,0x00,0x00,0x00
+
# GFX12: scratch_load_b128 v[1:4], off, off offset:-64 ; encoding: [0x7c,0xc0,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff]
0x7c,0xc0,0x05,0xed,0x01,0x00,0x00,0x00,0x00,0xc0,0xff,0xff
diff --git a/llvm/test/MC/RISCV/function-call.s b/llvm/test/MC/RISCV/function-call.s
index e0650593ec91..1521ae7e55e1 100644
--- a/llvm/test/MC/RISCV/function-call.s
+++ b/llvm/test/MC/RISCV/function-call.s
@@ -50,7 +50,7 @@ call foo@plt
# RELOC: R_RISCV_CALL_PLT foo 0x0
# INSTR: auipc ra, 0
# INSTR: jalr ra
-# FIXUP: fixup A - offset: 0, value: foo@plt, kind: fixup_riscv_call_plt
+# FIXUP: fixup A - offset: 0, value: foo, kind: fixup_riscv_call_plt
# Ensure that an explicit register operand can be parsed.
@@ -64,4 +64,4 @@ call a0, foo@plt
# RELOC: R_RISCV_CALL_PLT foo 0x0
# INSTR: auipc a0, 0
# INSTR: jalr a0
-# FIXUP: fixup A - offset: 0, value: foo@plt, kind: fixup_riscv_call_plt
+# FIXUP: fixup A - offset: 0, value: foo, kind: fixup_riscv_call_plt
diff --git a/llvm/test/MC/RISCV/tail-call.s b/llvm/test/MC/RISCV/tail-call.s
index 3670c7749ab1..c94af672edda 100644
--- a/llvm/test/MC/RISCV/tail-call.s
+++ b/llvm/test/MC/RISCV/tail-call.s
@@ -50,4 +50,4 @@ tail foo@plt
# RELOC: R_RISCV_CALL_PLT foo 0x0
# INSTR: auipc t1, 0
# INSTR: jr t1
-# FIXUP: fixup A - offset: 0, value: foo@plt, kind:
+# FIXUP: fixup A - offset: 0, value: foo, kind: fixup_riscv_call_plt
diff --git a/llvm/test/Other/print-at-pass-number.ll b/llvm/test/Other/print-at-pass-number.ll
index 8b2d3144e092..b9c09a36ca1f 100644
--- a/llvm/test/Other/print-at-pass-number.ll
+++ b/llvm/test/Other/print-at-pass-number.ll
@@ -1,13 +1,9 @@
; RUN: opt -passes="loop(indvars,loop-deletion,loop-unroll-full)" -print-pass-numbers -S -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=NUMBER
-; RUN: opt -passes="loop(indvars,loop-deletion,loop-unroll-full)" -print-module-scope -print-at-pass-number=3 -S -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=AT
-; RUN: opt -passes="loop(indvars,loop-deletion,loop-unroll-full)" -print-module-scope -print-at-pass-number=4 -S -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=AT-INVALIDATE
+; RUN: opt -passes="loop(indvars,loop-deletion,loop-unroll-full)" -print-module-scope -print-before-pass-number=3 -S -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=BEFORE
define i32 @bar(i32 %arg) {
-; AT: *** IR Dump At 3-IndVarSimplifyPass on bb1 ***
-; AT: define i32 @bar(i32 %arg) {
-
-; AT-INVALIDATE: *** IR Dump At 4-LoopDeletionPass on bb1 (invalidated) ***
-; AT-INVALIDATE: define i32 @bar(i32 %arg) {
+; BEFORE: *** IR Dump Before 3-IndVarSimplifyPass on bb1 ***
+; BEFORE: define i32 @bar(i32 %arg) {
bb:
br label %bb1
diff --git a/llvm/test/Transforms/ConstraintElimination/monotonic-int-phis-signed.ll b/llvm/test/Transforms/ConstraintElimination/monotonic-int-phis-signed.ll
index 1e95fab5c10c..7273469fc59e 100644
--- a/llvm/test/Transforms/ConstraintElimination/monotonic-int-phis-signed.ll
+++ b/llvm/test/Transforms/ConstraintElimination/monotonic-int-phis-signed.ll
@@ -15,10 +15,8 @@ define void @signed_iv_step_1(i64 %end) {
; CHECK-NEXT: [[CMP_I_NOT:%.*]] = icmp eq i64 [[IV]], [[END]]
; CHECK-NEXT: br i1 [[CMP_I_NOT]], label [[EXIT]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i64 [[IV]], [[END]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sge i64 [[IV]], -10
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: br label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
@@ -141,10 +139,8 @@ define void @signed_iv_step_4_start_4(i64 %count) {
; CHECK-NEXT: [[CMP_I_NOT:%.*]] = icmp eq i64 [[IV]], [[END]]
; CHECK-NEXT: br i1 [[CMP_I_NOT]], label [[EXIT]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i64 [[IV]], [[END]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sge i64 [[IV]], 4
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: br label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
@@ -226,10 +222,8 @@ define void @signed_iv_step_minus1(i64 %end) {
; CHECK-NEXT: [[CMP_I_NOT:%.*]] = icmp eq i64 [[IV]], [[END]]
; CHECK-NEXT: br i1 [[CMP_I_NOT]], label [[EXIT]], label [[LOOP_LATCH]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i64 [[IV]], [[END]]
-; CHECK-NEXT: call void @use(i1 [[CMP2]])
-; CHECK-NEXT: [[CMP3:%.*]] = icmp sle i64 [[IV]], 10
-; CHECK-NEXT: call void @use(i1 [[CMP3]])
+; CHECK-NEXT: call void @use(i1 true)
+; CHECK-NEXT: call void @use(i1 true)
; CHECK-NEXT: br label [[LOOP]]
; CHECK: exit:
; CHECK-NEXT: ret void
diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll
index 1f554c7b6025..10ab1fe11834 100644
--- a/llvm/test/Transforms/InstCombine/icmp.ll
+++ b/llvm/test/Transforms/InstCombine/icmp.ll
@@ -815,6 +815,51 @@ define i1 @test46(i32 %X, i32 %Y, i32 %Z) {
ret i1 %C
}
+define i1 @test46_multiuse1(i32 %X, i32 %Y, i32 %Z) {
+; CHECK-LABEL: @test46_multiuse1(
+; CHECK-NEXT: [[A:%.*]] = ashr exact i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT: call void @use_i32(i32 [[A]])
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X]], [[Y:%.*]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %A = ashr exact i32 %X, %Z
+ call void @use_i32(i32 %A)
+ %B = ashr exact i32 %Y, %Z
+ %C = icmp ult i32 %A, %B
+ ret i1 %C
+}
+
+define i1 @test46_multiuse2(i32 %X, i32 %Y, i32 %Z) {
+; CHECK-LABEL: @test46_multiuse2(
+; CHECK-NEXT: [[B:%.*]] = ashr exact i32 [[Y:%.*]], [[Z:%.*]]
+; CHECK-NEXT: call void @use_i32(i32 [[B]])
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[Y]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %A = ashr exact i32 %X, %Z
+ %B = ashr exact i32 %Y, %Z
+ call void @use_i32(i32 %B)
+ %C = icmp ult i32 %A, %B
+ ret i1 %C
+}
+
+define i1 @test46_multiuse3(i32 %X, i32 %Y, i32 %Z) {
+; CHECK-LABEL: @test46_multiuse3(
+; CHECK-NEXT: [[A:%.*]] = ashr exact i32 [[X:%.*]], [[Z:%.*]]
+; CHECK-NEXT: call void @use_i32(i32 [[A]])
+; CHECK-NEXT: [[B:%.*]] = ashr exact i32 [[Y:%.*]], [[Z]]
+; CHECK-NEXT: call void @use_i32(i32 [[B]])
+; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[A]], [[B]]
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %A = ashr exact i32 %X, %Z
+ call void @use_i32(i32 %A)
+ %B = ashr exact i32 %Y, %Z
+ call void @use_i32(i32 %B)
+ %C = icmp ult i32 %A, %B
+ ret i1 %C
+}
+
; PR9343 #5
define i1 @test47(i32 %X, i32 %Y, i32 %Z) {
; CHECK-LABEL: @test47(
diff --git a/llvm/test/Transforms/InstSimplify/select.ll b/llvm/test/Transforms/InstSimplify/select.ll
index 1b229f551093..fe93a0c3f212 100644
--- a/llvm/test/Transforms/InstSimplify/select.ll
+++ b/llvm/test/Transforms/InstSimplify/select.ll
@@ -1733,3 +1733,22 @@ define i8 @select_or_disjoint_eq(i8 %x, i8 %y) {
%sel = select i1 %cmp, i8 %x, i8 %or
ret i8 %sel
}
+
+define <4 x i32> @select_vector_cmp_with_bitcasts(<2 x i64> %x, <4 x i32> %y) {
+; CHECK-LABEL: @select_vector_cmp_with_bitcasts(
+; CHECK-NEXT: [[X_BC:%.*]] = bitcast <2 x i64> [[X:%.*]] to <4 x i32>
+; CHECK-NEXT: [[Y_BC:%.*]] = bitcast <4 x i32> [[Y:%.*]] to <2 x i64>
+; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> [[X]], [[Y_BC]]
+; CHECK-NEXT: [[SUB_BC:%.*]] = bitcast <2 x i64> [[SUB]] to <4 x i32>
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <4 x i32> [[Y]], [[X_BC]]
+; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> [[CMP]], <4 x i32> [[SUB_BC]], <4 x i32> zeroinitializer
+; CHECK-NEXT: ret <4 x i32> [[SEL]]
+;
+ %x.bc = bitcast <2 x i64> %x to <4 x i32>
+ %y.bc = bitcast <4 x i32> %y to <2 x i64>
+ %sub = sub <2 x i64> %x, %y.bc
+ %sub.bc = bitcast <2 x i64> %sub to <4 x i32>
+ %cmp = icmp eq <4 x i32> %y, %x.bc
+ %sel = select <4 x i1> %cmp, <4 x i32> %sub.bc, <4 x i32> zeroinitializer
+ ret <4 x i32> %sel
+}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-poison-generating-flags.ll
index bc5f137564b3..3c6cba1b0022 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/drop-poison-generating-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/drop-poison-generating-flags.ll
@@ -566,6 +566,57 @@ exit:
ret void
}
+; %B.gep.0 and pointers based on it can preserve inbounds, as the inbounds
+; versionused unconditionally in the store in the latch.
+; FIXME: at the moment, inbounds is dropped from both the GEP feeding the vector load ans tore
+define void @Bgep_inbounds_unconditionally_due_to_store(ptr noalias %B, ptr readonly %C) #0 {
+; CHECK-LABEL: define void @Bgep_inbounds_unconditionally_due_to_store(
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr %C, i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], <i32 20, i32 20, i32 20, i32 20>
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr float, ptr %B, i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr float, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
+; CHECK-NEXT: [[TMP6:%.*]] = fadd <4 x float> [[WIDE_LOAD2]], <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+; CHECK-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP3]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP3]], <4 x float> <float 3.300000e+01, float 3.300000e+01, float 3.300000e+01, float 3.300000e+01>, <4 x float> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 0
+; CHECK-NEXT: store <4 x float> [[PREDPHI]], ptr [[TMP8]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
+; CHECK-NEXT: br i1 [[TMP9]], label %middle.block, label %vector.body
+
+entry:
+ br label %loop.body
+
+loop.body:
+ %iv1 = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %C.gep = getelementptr inbounds i32, ptr %C, i64 %iv1
+ %C.lv = load i32, ptr %C.gep, align 4
+ %cmp = icmp eq i32 %C.lv, 20
+ %B.gep.0 = getelementptr inbounds float, ptr %B, i64 %iv1
+ br i1 %cmp, label %loop.latch, label %else
+
+else:
+ %B.lv = load float, ptr %B.gep.0, align 4
+ %add = fadd float %B.lv, 2.0
+ br label %loop.latch
+
+loop.latch:
+ %add.sink = phi float [ %add, %else ], [ 33.0, %loop.body ]
+ store float %add.sink, ptr %B.gep.0, align 4
+ %iv.next = add nuw nsw i64 %iv1, 1
+ %exitcond.not = icmp eq i64 %iv.next, 10000
+ br i1 %exitcond.not, label %exit, label %loop.body
+
+exit:
+ ret void
+}
+
attributes #0 = { noinline nounwind uwtable "target-features"="+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" }
!0 = !{}
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll b/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll
index 3973a288fe71..2a58748d8fb6 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll
@@ -11,15 +11,12 @@ define i8 @PR34687(i1 %c, i32 %x, i32 %n) {
; CHECK: vector.ph:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: [[TMP1:%.*]] = and <4 x i32> [[VEC_PHI]], <i32 255, i32 255, i32 255, i32 255>
; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[TMP1]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i8>
@@ -40,7 +37,7 @@ define i8 @PR34687(i1 %c, i32 %x, i32 %n) {
; CHECK: for.body:
; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ]
; CHECK-NEXT: [[R:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[R_NEXT:%.*]], [[IF_END]] ]
-; CHECK-NEXT: br i1 [[C]], label [[IF_THEN:%.*]], label [[IF_END]]
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_END]]
; CHECK: if.then:
; CHECK-NEXT: [[T0:%.*]] = sdiv i32 undef, undef
; CHECK-NEXT: br label [[IF_END]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
index b43fcc8548ce..cf4dfc0b76ac 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll
@@ -150,29 +150,30 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i64 4
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope !4
-; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope !4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4, !alias.scope [[META4:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4, !alias.scope [[META4]]
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], <i32 20, i32 20, i32 20, i32 20>
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD7]], <i32 20, i32 20, i32 20, i32 20>
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 4
-; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !alias.scope !7
-; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !alias.scope !7
+; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !alias.scope [[META7:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !alias.scope [[META7]]
; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x float> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[WIDE_LOAD9]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr float, ptr [[TMP8]], i64 4
-; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !alias.scope !9, !noalias !11
-; CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !alias.scope !9, !noalias !11
+; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META11:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !alias.scope [[META9]], !noalias [[META11]]
; CHECK-NEXT: [[TMP10:%.*]] = select <4 x i1> [[TMP2]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[WIDE_LOAD10]]
; CHECK-NEXT: [[PREDPHI:%.*]] = fadd <4 x float> [[TMP6]], [[TMP10]]
; CHECK-NEXT: [[TMP11:%.*]] = select <4 x i1> [[TMP3]], <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, <4 x float> [[WIDE_LOAD11]]
; CHECK-NEXT: [[PREDPHI12:%.*]] = fadd <4 x float> [[TMP7]], [[TMP11]]
-; CHECK-NEXT: store <4 x float> [[PREDPHI]], ptr [[TMP8]], align 4, !alias.scope !9, !noalias !11
-; CHECK-NEXT: store <4 x float> [[PREDPHI12]], ptr [[TMP9]], align 4, !alias.scope !9, !noalias !11
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 4
+; CHECK-NEXT: store <4 x float> [[PREDPHI]], ptr [[TMP8]], align 4, !alias.scope [[META9]], !noalias [[META11]]
+; CHECK-NEXT: store <4 x float> [[PREDPHI12]], ptr [[TMP12]], align 4, !alias.scope [[META9]], !noalias [[META11]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
-; CHECK-NEXT: br i1 [[TMP12]], label [[EXIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 10000
+; CHECK-NEXT: br i1 [[TMP13]], label [[EXIT:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: loop.body:
; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: [[C_GEP:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV1]]
diff --git a/llvm/test/Transforms/SCCP/switch.ll b/llvm/test/Transforms/SCCP/switch.ll
index 19e72217fd03..306f0eebf2b4 100644
--- a/llvm/test/Transforms/SCCP/switch.ll
+++ b/llvm/test/Transforms/SCCP/switch.ll
@@ -4,6 +4,8 @@
; Make sure we always consider the default edge executable for a switch
; with no cases.
declare void @foo()
+declare i32 @g(i32)
+
define void @test1() {
; CHECK-LABEL: @test1(
; CHECK-NEXT: switch i32 undef, label [[D:%.*]] [
@@ -115,17 +117,16 @@ switch.1:
ret i32 %phi
}
-; TODO: Determine that the default destination is dead.
define i32 @test_local_range(ptr %p) {
; CHECK-LABEL: @test_local_range(
; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[P:%.*]], align 4, !range [[RNG0]]
-; CHECK-NEXT: switch i32 [[X]], label [[SWITCH_DEFAULT:%.*]] [
+; CHECK-NEXT: switch i32 [[X]], label [[DEFAULT_UNREACHABLE:%.*]] [
; CHECK-NEXT: i32 0, label [[SWITCH_0:%.*]]
; CHECK-NEXT: i32 1, label [[SWITCH_1:%.*]]
; CHECK-NEXT: i32 2, label [[SWITCH_2:%.*]]
; CHECK-NEXT: ]
-; CHECK: switch.default:
-; CHECK-NEXT: ret i32 -1
+; CHECK: default.unreachable:
+; CHECK-NEXT: unreachable
; CHECK: switch.0:
; CHECK-NEXT: ret i32 0
; CHECK: switch.1:
@@ -161,14 +162,14 @@ switch.3:
define i32 @test_duplicate_successors(ptr %p) {
; CHECK-LABEL: @test_duplicate_successors(
; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[P:%.*]], align 4, !range [[RNG0]]
-; CHECK-NEXT: switch i32 [[X]], label [[SWITCH_DEFAULT:%.*]] [
+; CHECK-NEXT: switch i32 [[X]], label [[DEFAULT_UNREACHABLE:%.*]] [
; CHECK-NEXT: i32 0, label [[SWITCH_0:%.*]]
; CHECK-NEXT: i32 1, label [[SWITCH_0]]
; CHECK-NEXT: i32 2, label [[SWITCH_1:%.*]]
; CHECK-NEXT: i32 3, label [[SWITCH_1]]
; CHECK-NEXT: ]
-; CHECK: switch.default:
-; CHECK-NEXT: ret i32 -1
+; CHECK: default.unreachable:
+; CHECK-NEXT: unreachable
; CHECK: switch.0:
; CHECK-NEXT: ret i32 0
; CHECK: switch.1:
@@ -201,13 +202,13 @@ switch.2:
; range information.
define internal i32 @test_ip_range(i32 %x) {
; CHECK-LABEL: @test_ip_range(
-; CHECK-NEXT: switch i32 [[X:%.*]], label [[SWITCH_DEFAULT:%.*]] [
+; CHECK-NEXT: switch i32 [[X:%.*]], label [[DEFAULT_UNREACHABLE:%.*]] [
; CHECK-NEXT: i32 3, label [[SWITCH_3:%.*]]
; CHECK-NEXT: i32 1, label [[SWITCH_1:%.*]]
; CHECK-NEXT: i32 2, label [[SWITCH_2:%.*]]
; CHECK-NEXT: ], !prof [[PROF1:![0-9]+]]
-; CHECK: switch.default:
-; CHECK-NEXT: ret i32 -1
+; CHECK: default.unreachable:
+; CHECK-NEXT: unreachable
; CHECK: switch.1:
; CHECK-NEXT: ret i32 1
; CHECK: switch.2:
@@ -240,8 +241,8 @@ switch.3:
define void @call_test_ip_range() {
; CHECK-LABEL: @call_test_ip_range(
-; CHECK-NEXT: [[TMP1:%.*]] = call i32 @test_ip_range(i32 1)
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @test_ip_range(i32 3)
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @test_ip_range(i32 1), !range [[RNG2:![0-9]+]]
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @test_ip_range(i32 3), !range [[RNG2]]
; CHECK-NEXT: ret void
;
call i32 @test_ip_range(i32 1)
@@ -301,6 +302,72 @@ end.2:
ret i32 20
}
+define i32 @test_default_unreachable_by_dom_cond(i32 %x) {
+; CHECK-LABEL: @test_default_unreachable_by_dom_cond(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[OR_COND:%.*]] = icmp ult i32 [[X:%.*]], 4
+; CHECK-NEXT: br i1 [[OR_COND]], label [[IF_THEN:%.*]], label [[RETURN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: switch i32 [[X]], label [[DEFAULT_UNREACHABLE:%.*]] [
+; CHECK-NEXT: i32 0, label [[SW_BB:%.*]]
+; CHECK-NEXT: i32 1, label [[SW_BB2:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_BB4:%.*]]
+; CHECK-NEXT: i32 3, label [[SW_BB6:%.*]]
+; CHECK-NEXT: ]
+; CHECK: sw.bb:
+; CHECK-NEXT: [[CALL:%.*]] = tail call i32 @g(i32 2)
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: sw.bb2:
+; CHECK-NEXT: [[CALL3:%.*]] = tail call i32 @g(i32 3)
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: sw.bb4:
+; CHECK-NEXT: [[CALL5:%.*]] = tail call i32 @g(i32 4)
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: sw.bb6:
+; CHECK-NEXT: [[CALL7:%.*]] = tail call i32 @g(i32 5)
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: default.unreachable:
+; CHECK-NEXT: unreachable
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[CALL7]], [[SW_BB6]] ], [ [[CALL5]], [[SW_BB4]] ], [ [[CALL3]], [[SW_BB2]] ], [ [[CALL]], [[SW_BB]] ], [ -23, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+entry:
+ %or.cond = icmp ult i32 %x, 4
+ br i1 %or.cond, label %if.then, label %return
+
+if.then:
+ switch i32 %x, label %sw.epilog [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb2
+ i32 2, label %sw.bb4
+ i32 3, label %sw.bb6
+ ]
+
+sw.bb:
+ %call = tail call i32 @g(i32 2)
+ br label %return
+
+sw.bb2:
+ %call3 = tail call i32 @g(i32 3)
+ br label %return
+
+sw.bb4:
+ %call5 = tail call i32 @g(i32 4)
+ br label %return
+
+sw.bb6:
+ %call7 = tail call i32 @g(i32 5)
+ br label %return
+
+sw.epilog:
+ call void @foo()
+ br label %return
+
+return:
+ %retval.0 = phi i32 [ %call7, %sw.bb6 ], [ %call5, %sw.bb4 ], [ %call3, %sw.bb2 ], [ %call, %sw.bb ], [ -23, %sw.epilog ], [ -23, %entry ]
+ ret i32 %retval.0
+}
declare void @llvm.assume(i1)
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/splat-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/splat-buildvector.ll
new file mode 100644
index 000000000000..5e5981bdaaa8
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/splat-buildvector.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt --passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu %s -o - -slp-threshold=-100 | FileCheck %s
+declare i64 @llvm.smax.i64(i64, i64)
+
+define i8 @foo(i64 %val_i64_57) {
+; CHECK-LABEL: define i8 @foo(
+; CHECK-SAME: i64 [[VAL_I64_57:%.*]]) {
+; CHECK-NEXT: entry_1:
+; CHECK-NEXT: [[VAL_I64_58:%.*]] = call i64 @llvm.smax.i64(i64 0, i64 1)
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i64> <i64 0, i64 poison, i64 poison, i64 0>, i64 [[VAL_I64_57]], i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i64> [[TMP0]], i64 [[VAL_I64_58]], i32 2
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> poison, <4 x i32> <i32 2, i32 2, i32 0, i32 1>
+; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = icmp sle <4 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i1> [[TMP3]], <4 x i1> [[TMP4]], <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+; CHECK-NEXT: ret i8 0
+;
+entry_1:
+ %val_i64_58 = call i64 @llvm.smax.i64(i64 0, i64 1)
+ %val_i1_89 = icmp ule i64 %val_i64_57, %val_i64_58
+ %val_i1_95 = icmp sle i64 0, undef
+ %val_i1_98 = icmp uge i64 %val_i64_58, %val_i64_58
+ %val_i1_99 = icmp ule i64 0, %val_i64_58
+ ret i8 0
+}
diff --git a/llvm/test/Transforms/SimpleLoopUnswitch/memssa-readnone-access.ll b/llvm/test/Transforms/SimpleLoopUnswitch/memssa-readnone-access.ll
new file mode 100644
index 000000000000..2aaf777683e1
--- /dev/null
+++ b/llvm/test/Transforms/SimpleLoopUnswitch/memssa-readnone-access.ll
@@ -0,0 +1,117 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -passes="loop-mssa(loop-instsimplify,simple-loop-unswitch<nontrivial>)" < %s | FileCheck %s
+
+@vtable = constant ptr @foo
+
+declare void @foo() memory(none)
+declare void @bar()
+
+; The call becomes known readnone after simplification, but still have a
+; MemoryAccess. Make sure this does not lead to an assertion failure.
+define void @test(i1 %c) {
+; CHECK-LABEL: define void @test(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT: [[C_FR:%.*]] = freeze i1 [[C]]
+; CHECK-NEXT: br i1 [[C_FR]], label [[DOTSPLIT_US:%.*]], label [[DOTSPLIT:%.*]]
+; CHECK: .split.us:
+; CHECK-NEXT: br label [[LOOP_US:%.*]]
+; CHECK: loop.us:
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: br label [[EXIT_SPLIT_US:%.*]]
+; CHECK: exit.split.us:
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: br label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+ br label %loop
+
+loop:
+ %fn = load ptr, ptr @vtable, align 8
+ call void %fn()
+ br i1 %c, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Variant with another access after the call.
+define void @test2(i1 %c, ptr %p) {
+; CHECK-LABEL: define void @test2(
+; CHECK-SAME: i1 [[C:%.*]], ptr [[P:%.*]]) {
+; CHECK-NEXT: [[C_FR:%.*]] = freeze i1 [[C]]
+; CHECK-NEXT: br i1 [[C_FR]], label [[DOTSPLIT_US:%.*]], label [[DOTSPLIT:%.*]]
+; CHECK: .split.us:
+; CHECK-NEXT: br label [[LOOP_US:%.*]]
+; CHECK: loop.us:
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: call void @bar()
+; CHECK-NEXT: br label [[EXIT_SPLIT_US:%.*]]
+; CHECK: exit.split.us:
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: call void @bar()
+; CHECK-NEXT: br label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+ br label %loop
+
+loop:
+ %fn = load ptr, ptr @vtable, align 8
+ call void %fn()
+ call void @bar()
+ br i1 %c, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+; Variant with another access after the call and no access before the call.
+define void @test3(i1 %c, ptr %p) {
+; CHECK-LABEL: define void @test3(
+; CHECK-SAME: i1 [[C:%.*]], ptr [[P:%.*]]) {
+; CHECK-NEXT: [[C_FR:%.*]] = freeze i1 [[C]]
+; CHECK-NEXT: br i1 [[C_FR]], label [[DOTSPLIT_US:%.*]], label [[DOTSPLIT:%.*]]
+; CHECK: .split.us:
+; CHECK-NEXT: br label [[LOOP_US:%.*]]
+; CHECK: loop.us:
+; CHECK-NEXT: br label [[SPLIT_US:%.*]]
+; CHECK: split.us:
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: call void @bar()
+; CHECK-NEXT: br label [[EXIT_SPLIT_US:%.*]]
+; CHECK: exit.split.us:
+; CHECK-NEXT: br label [[EXIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: br label [[SPLIT:%.*]]
+; CHECK: split:
+; CHECK-NEXT: call void @foo()
+; CHECK-NEXT: call void @bar()
+; CHECK-NEXT: br label [[LOOP]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+ br label %loop
+
+loop:
+ %fn = load ptr, ptr @vtable, align 8
+ br label %split
+
+split:
+ call void %fn()
+ call void @bar()
+ br i1 %c, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test b/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test
index 0199bf28681b..d5b78bd1487e 100644
--- a/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test
+++ b/llvm/test/tools/dsymutil/ARM/dwarf5-dwarf4-combination-macho.test
@@ -73,7 +73,7 @@ CHECK-NEXT: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x[[
CHECK: DW_AT_linkage_name [DW_FORM_strx] (indexed (00000005) string = "_Z4foo2i")
CHECK: DW_AT_name [DW_FORM_strx] (indexed (00000006) string = "foo2")
CHECK: DW_TAG_formal_parameter
-CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x[[LOCLIST_OFFSET:[0-9a-f]+]]:
+CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x[[LOCLIST_OFFSET:[0-9a-f]+]]:
CHECK-NEXT: [0x[[#%.16x,LOCLIST_PAIR_START:]], 0x[[#%.16x,LOCLIST_PAIR_END:]]): [[LOCLIST_EXPR:.*]]
CHECK-NEXT: [0x[[#%.16x,LOCLIST_PAIR_START2:]], 0x[[#%.16x,LOCLIST_PAIR_END2:]]): [[LOCLIST_EXPR2:.*]])
CHECK: DW_AT_name [DW_FORM_strx] (indexed (00000007) string = "a")
@@ -93,7 +93,7 @@ CHECK-NEXT: DW_AT_low_pc [DW_FORM_addr] (0x[[#%.16x,LOC_LOWPC
CHECK: DW_AT_linkage_name [DW_FORM_strp] ( .debug_str[0x000000e6] = "_Z3bari")
CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x000000ee] = "bar")
CHECK: DW_TAG_formal_parameter
-CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x[[LOC_OFFSET:[0-9a-f]+]]:
+CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x[[LOC_OFFSET:[0-9a-f]+]]:
CHECK-NEXT: [0x[[#%.16x,LOC_PAIR_START:]], 0x[[#%.16x,LOC_PAIR_END:]]): [[LOC_EXPR:.*]]
CHECK-NEXT: [0x[[#%.16x,LOC_PAIR_START2:]], 0x[[#%.16x,LOC_PAIR_END2:]]): [[LOC_EXPR2:.*]])
CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x000000f2] = "x")
@@ -105,7 +105,7 @@ CHECK-NEXT: (0x[[#sub(LOC_PAIR_START2,LOC_LOWPC)]], 0x[[#sub(LOC_PAIR
CHECK: .debug_loclists contents:
CHECK-NEXT: 0x00000000: locations list header: length = 0x00000018, format = DWARF32, version = 0x0005, addr_size = 0x08, seg_size = 0x00, offset_entry_count = 0x00000000
-CHECK-NEXT: 0x[[LOCLIST_OFFSET]]:
+CHECK-NEXT: 0x[[LOCLIST_OFFSET]]:
CHECK-NEXT: DW_LLE_base_addressx (0x0000000000000000)
CHECK-NEXT: DW_LLE_offset_pair (0x[[#sub(LOCLIST_PAIR_START,LOCLIST_LOWPC)]], 0x[[#sub(LOCLIST_PAIR_END,LOCLIST_LOWPC)]])
CHECK-NEXT: DW_LLE_offset_pair (0x[[#sub(LOCLIST_PAIR_START2,LOCLIST_LOWPC)]], 0x[[#sub(LOCLIST_PAIR_END2,LOCLIST_LOWPC)]])
@@ -114,12 +114,12 @@ CHECK-NEXT: DW_LLE_end_of_list ()
CHECK: .debug_line contents:
CHECK-NEXT: debug_line[0x00000000]
CHECK-NEXT: Line table prologue:
-CHECK-NEXT: total_length: 0x00000048
+CHECK-NEXT: total_length: 0x0000005a
CHECK-NEXT: format: DWARF32
CHECK-NEXT: version: 5
CHECK-NEXT: address_size: 8
CHECK-NEXT: seg_select_size: 0
-CHECK-NEXT: prologue_length: 0x00000025
+CHECK-NEXT: prologue_length: 0x00000037
CHECK-NEXT: min_inst_length: 1
CHECK-NEXT: max_ops_per_inst: 1
CHECK-NEXT: default_is_stmt: 1
@@ -143,7 +143,7 @@ CHECK-NEXT: file_names[ 0]:
CHECK-NEXT: name: .debug_line_str[0x00000029] = "a.cpp"
CHECK-NEXT: dir_index: 0
-CHECK: debug_line[0x0000004c]
+CHECK: debug_line[0x0000005e]
CHECK-NEXT: Line table prologue:
CHECK-NEXT: total_length: 0x0000003b
CHECK-NEXT: format: DWARF32
diff --git a/llvm/test/tools/dsymutil/ARM/dwarf5-macho.test b/llvm/test/tools/dsymutil/ARM/dwarf5-macho.test
index 13409b2a07a3..f6d42a18424c 100644
--- a/llvm/test/tools/dsymutil/ARM/dwarf5-macho.test
+++ b/llvm/test/tools/dsymutil/ARM/dwarf5-macho.test
@@ -49,13 +49,13 @@ CHECK-NEXT: DW_AT_addr_base [DW_FORM_sec_offset] (0x00000008)
CHECK: DW_TAG_subprogram
CHECK-NEXT: DW_AT_low_pc [DW_FORM_addrx] (indexed (00000000) address = 0x[[#%.16x,LOCLIST_LOWPC:]])
CHECK: DW_TAG_formal_parameter
-CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x[[LOC_OFFSET:[0-9a-f]+]]:
+CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (0x[[LOC_OFFSET:[0-9a-f]+]]:
CHECK-NEXT: [0x[[#%.16x,LOCLIST_PAIR_START:]], 0x[[#%.16x,LOCLIST_PAIR_END:]]): [[LOCLIST_EXPR:.*]]
CHECK-NEXT: [0x[[#%.16x,LOCLIST_PAIR_START2:]], 0x[[#%.16x,LOCLIST_PAIR_END2:]]): [[LOCLIST_EXPR2:.*]])
CHECK: .debug_loclists contents:
CHECK-NEXT: 0x00000000: locations list header: length = 0x00000018, format = DWARF32, version = 0x0005, addr_size = 0x08, seg_size = 0x00, offset_entry_count = 0x00000000
-CHECK-NEXT: 0x[[LOC_OFFSET]]:
+CHECK-NEXT: 0x[[LOC_OFFSET]]:
CHECK-NEXT: DW_LLE_base_addressx (0x0000000000000000)
CHECK-NEXT: DW_LLE_offset_pair (0x[[#sub(LOCLIST_PAIR_START,LOCLIST_LOWPC)]], 0x[[#sub(LOCLIST_PAIR_END,LOCLIST_LOWPC)]])
CHECK-NEXT: DW_LLE_offset_pair (0x[[#sub(LOCLIST_PAIR_START2,LOCLIST_LOWPC)]], 0x[[#sub(LOCLIST_PAIR_END2,LOCLIST_LOWPC)]])
@@ -64,12 +64,12 @@ CHECK-NEXT: DW_LLE_end_of_list ()
CHECK: .debug_line contents:
CHECK-NEXT: debug_line[0x00000000]
CHECK-NEXT: Line table prologue:
-CHECK-NEXT: total_length: 0x00000048
+CHECK-NEXT: total_length: 0x0000005a
CHECK-NEXT: format: DWARF32
CHECK-NEXT: version: 5
CHECK-NEXT: address_size: 8
CHECK-NEXT: seg_select_size: 0
-CHECK-NEXT: prologue_length: 0x00000025
+CHECK-NEXT: prologue_length: 0x00000037
CHECK-NEXT: min_inst_length: 1
CHECK-NEXT: max_ops_per_inst: 1
CHECK-NEXT: default_is_stmt: 1
@@ -92,6 +92,7 @@ CHECK-NEXT: include_directories[ 0] = .debug_line_str[0x00000000] = "/Users/sh
CHECK-NEXT: file_names[ 0]:
CHECK-NEXT: name: .debug_line_str[0x00000029] = "a.cpp"
CHECK-NEXT: dir_index: 0
+CHECK-NEXT: md5_checksum: 2675ab7ce3623b564cfd8a2906a462e5
CHECK: .debug_str contents:
diff --git a/llvm/test/tools/llvm-dwarfdump/AArch64/verify-no-file.yaml b/llvm/test/tools/llvm-dwarfdump/AArch64/verify-no-file.yaml
index 1327cc26ee9f..808cc7be638b 100644
--- a/llvm/test/tools/llvm-dwarfdump/AArch64/verify-no-file.yaml
+++ b/llvm/test/tools/llvm-dwarfdump/AArch64/verify-no-file.yaml
@@ -1,5 +1,6 @@
# RUN: yaml2obj %s -o %t.o
# RUN: llvm-dwarfdump -arch arm64 --debug-line --verify %t.o 2>&1 | FileCheck %s
+# REQUIRES: system-darwin
# CHECK-NOT: error: .debug_line[0x{{[0-9a-f]+}}][0] has invalid file index 1 (valid values are [1,0]):
--- !mach-o
diff --git a/llvm/unittests/Analysis/VectorFunctionABITest.cpp b/llvm/unittests/Analysis/VectorFunctionABITest.cpp
index b72b4b3b21d4..d8a7d8245bb0 100644
--- a/llvm/unittests/Analysis/VectorFunctionABITest.cpp
+++ b/llvm/unittests/Analysis/VectorFunctionABITest.cpp
@@ -88,14 +88,6 @@ protected:
/// Returns whether the parsed function contains a mask.
bool isMasked() const { return Info.isMasked(); }
- /// Check if the number of vectorized parameters matches the scalar ones. This
- /// requires a correct scalar FunctionType string to be fed to the
- /// 'invokeParser'. Mask parameters that are only required by the vector
- /// function call are ignored.
- bool matchParametersNum() {
- return (Parameters.size() - isMasked()) == ScalarFTy->getNumParams();
- }
-
FunctionType *getFunctionType() {
return VFABI::createFunctionType(Info, ScalarFTy);
}
@@ -162,7 +154,6 @@ TEST_F(VFABIParserTest, ParamListParsing) {
invokeParser("_ZGVnN2vl16Ls32R3l_foo", "void(i32, i32, i32, ptr, i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_EQ(false, isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
Type::getVoidTy(Ctx),
{VectorType::get(Type::getInt32Ty(Ctx), ElementCount::getFixed(2)),
@@ -184,7 +175,6 @@ TEST_F(VFABIParserTest, ScalarNameAndVectorName_01) {
EXPECT_TRUE(invokeParser("_ZGVnM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_EQ(true, isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(ScalarName, "foo");
EXPECT_EQ(VectorName, "vector_foo");
@@ -194,7 +184,6 @@ TEST_F(VFABIParserTest, ScalarNameAndVectorName_02) {
EXPECT_TRUE(invokeParser("_ZGVnM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_EQ(true, isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(ScalarName, "foo");
EXPECT_EQ(VectorName, "vector_foo");
@@ -205,14 +194,13 @@ TEST_F(VFABIParserTest, ScalarNameAndVectorName_03) {
invokeParser("_ZGVnM2v___foo_bar_abc(fooBarAbcVec)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_EQ(true, isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(ScalarName, "__foo_bar_abc");
EXPECT_EQ(VectorName, "fooBarAbcVec");
}
TEST_F(VFABIParserTest, ScalarNameOnly) {
- EXPECT_TRUE(invokeParser("_ZGVnM2v___foo_bar_abc"));
+ EXPECT_TRUE(invokeParser("_ZGVnM2v___foo_bar_abc", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_EQ(true, isMasked());
EXPECT_EQ(ScalarName, "__foo_bar_abc");
@@ -227,7 +215,6 @@ TEST_F(VFABIParserTest, Parse) {
"void(i32, i32, i32, i32, ptr, i32, i32, i32, ptr)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
Type::getVoidTy(Ctx),
{
@@ -262,7 +249,6 @@ TEST_F(VFABIParserTest, ParseVectorName) {
EXPECT_TRUE(invokeParser("_ZGVnN2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyNoMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)1);
@@ -276,7 +262,6 @@ TEST_F(VFABIParserTest, LinearWithCompileTimeNegativeStep) {
"void(i32, i32, i32, ptr)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
Type::getVoidTy(Ctx),
{Type::getInt32Ty(Ctx), Type::getInt32Ty(Ctx), Type::getInt32Ty(Ctx),
@@ -297,7 +282,6 @@ TEST_F(VFABIParserTest, ParseScalableSVE) {
EXPECT_TRUE(invokeParser("_ZGVsMxv_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskedVLA_i32);
EXPECT_EQ(VF, ElementCount::getScalable(4));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -311,7 +295,6 @@ TEST_F(VFABIParserTest, ParseFixedWidthSVE) {
EXPECT_TRUE(invokeParser("_ZGVsM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -329,16 +312,16 @@ TEST_F(VFABIParserTest, NotAVectorFunctionABIName) {
TEST_F(VFABIParserTest, LinearWithRuntimeStep) {
EXPECT_FALSE(invokeParser("_ZGVnN2ls_foo"))
<< "A number should be present after \"ls\".";
- EXPECT_TRUE(invokeParser("_ZGVnN2ls2_foo"));
+ EXPECT_TRUE(invokeParser("_ZGVnN2ls2_foo", "void(i32)"));
EXPECT_FALSE(invokeParser("_ZGVnN2Rs_foo"))
<< "A number should be present after \"Rs\".";
- EXPECT_TRUE(invokeParser("_ZGVnN2Rs4_foo"));
+ EXPECT_TRUE(invokeParser("_ZGVnN2Rs4_foo", "void(i32)"));
EXPECT_FALSE(invokeParser("_ZGVnN2Ls_foo"))
<< "A number should be present after \"Ls\".";
- EXPECT_TRUE(invokeParser("_ZGVnN2Ls6_foo"));
+ EXPECT_TRUE(invokeParser("_ZGVnN2Ls6_foo", "void(i32)"));
EXPECT_FALSE(invokeParser("_ZGVnN2Us_foo"))
<< "A number should be present after \"Us\".";
- EXPECT_TRUE(invokeParser("_ZGVnN2Us8_foo"));
+ EXPECT_TRUE(invokeParser("_ZGVnN2Us8_foo", "void(i32)"));
}
TEST_F(VFABIParserTest, LinearWithoutCompileTime) {
@@ -346,7 +329,6 @@ TEST_F(VFABIParserTest, LinearWithoutCompileTime) {
"void(i32, i32, ptr, i32, i32, i32, ptr, i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
Type::getVoidTy(Ctx),
{Type::getInt32Ty(Ctx), Type::getInt32Ty(Ctx),
@@ -373,7 +355,6 @@ TEST_F(VFABIParserTest, LLVM_ISA) {
EXPECT_TRUE(invokeParser("_ZGV_LLVM_N2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::LLVM);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyNoMaskVLen2_i32);
EXPECT_EQ(Parameters.size(), (unsigned)1);
EXPECT_EQ(Parameters[0], VFParameter({0, VFParamKind::Vector}));
@@ -393,7 +374,6 @@ TEST_F(VFABIParserTest, Align) {
EXPECT_TRUE(invokeParser("_ZGVsN2l2a2_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(Parameters.size(), (unsigned)1);
EXPECT_EQ(Parameters[0].Alignment, Align(2));
EXPECT_EQ(ScalarName, "foo");
@@ -409,7 +389,7 @@ TEST_F(VFABIParserTest, Align) {
EXPECT_FALSE(invokeParser("_ZGVsM2a2_foo"));
// Alignment must be a power of 2.
EXPECT_FALSE(invokeParser("_ZGVsN2l2a0_foo"));
- EXPECT_TRUE(invokeParser("_ZGVsN2l2a1_foo"));
+ EXPECT_TRUE(invokeParser("_ZGVsN2l2a1_foo", "void(i32)"));
EXPECT_FALSE(invokeParser("_ZGVsN2l2a3_foo"));
EXPECT_FALSE(invokeParser("_ZGVsN2l2a6_foo"));
}
@@ -418,7 +398,6 @@ TEST_F(VFABIParserTest, ParseUniform) {
EXPECT_TRUE(invokeParser("_ZGVnN2u_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy =
FunctionType::get(Type::getVoidTy(Ctx), {Type::getInt32Ty(Ctx)}, false);
EXPECT_EQ(getFunctionType(), FTy);
@@ -463,7 +442,6 @@ TEST_F(VFABIParserTest, ISAIndependentMangling) {
do { \
EXPECT_EQ(VF, ElementCount::getFixed(2)); \
EXPECT_FALSE(isMasked()); \
- EXPECT_TRUE(matchParametersNum()); \
EXPECT_EQ(getFunctionType(), FTy); \
EXPECT_EQ(Parameters.size(), (unsigned)10); \
EXPECT_EQ(Parameters, ExpectedParams); \
@@ -539,7 +517,6 @@ TEST_F(VFABIParserTest, ParseMaskingNEON) {
EXPECT_TRUE(invokeParser("_ZGVnM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AdvancedSIMD);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -553,7 +530,6 @@ TEST_F(VFABIParserTest, ParseMaskingSVE) {
EXPECT_TRUE(invokeParser("_ZGVsM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -567,7 +543,6 @@ TEST_F(VFABIParserTest, ParseMaskingSSE) {
EXPECT_TRUE(invokeParser("_ZGVbM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::SSE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -581,7 +556,6 @@ TEST_F(VFABIParserTest, ParseMaskingAVX) {
EXPECT_TRUE(invokeParser("_ZGVcM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AVX);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -595,7 +569,6 @@ TEST_F(VFABIParserTest, ParseMaskingAVX2) {
EXPECT_TRUE(invokeParser("_ZGVdM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AVX2);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -609,7 +582,6 @@ TEST_F(VFABIParserTest, ParseMaskingAVX512) {
EXPECT_TRUE(invokeParser("_ZGVeM2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::AVX512);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -623,7 +595,6 @@ TEST_F(VFABIParserTest, ParseMaskingLLVM) {
EXPECT_TRUE(invokeParser("_ZGV_LLVM_M2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::LLVM);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskVLen2_i32);
EXPECT_EQ(VF, ElementCount::getFixed(2));
EXPECT_EQ(Parameters.size(), (unsigned)2);
@@ -642,7 +613,6 @@ TEST_F(VFABIParserTest, LLVM_InternalISA) {
EXPECT_TRUE(invokeParser("_ZGV_LLVM_N2v_foo(vector_foo)", "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::LLVM);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyNoMaskVLen2_i32);
EXPECT_EQ(Parameters.size(), (unsigned)1);
EXPECT_EQ(Parameters[0], VFParameter({0, VFParamKind::Vector}));
@@ -655,7 +625,6 @@ TEST_F(VFABIParserTest, LLVM_Intrinsics) {
"void(float, float)"));
EXPECT_EQ(ISA, VFISAKind::LLVM);
EXPECT_FALSE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
Type::getVoidTy(Ctx),
{
@@ -678,7 +647,6 @@ TEST_F(VFABIParserTest, ParseScalableRequiresDeclaration) {
EXPECT_TRUE(invokeParser(MangledName, "void(i32)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
EXPECT_EQ(getFunctionType(), FTyMaskedVLA_i32);
EXPECT_EQ(Parameters.size(), (unsigned)2);
EXPECT_EQ(Parameters[0], VFParameter({0, VFParamKind::Vector}));
@@ -698,7 +666,6 @@ TEST_F(VFABIParserTest, ParseScalableMaskingSVE) {
EXPECT_TRUE(invokeParser("_ZGVsMxv_foo(vector_foo)", "i32(i32)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
VectorType::get(Type::getInt32Ty(Ctx), ElementCount::getScalable(4)),
{VectorType::get(Type::getInt32Ty(Ctx), ElementCount::getScalable(4)),
@@ -718,7 +685,6 @@ TEST_F(VFABIParserTest, ParseScalableMaskingSVESincos) {
"void(double, ptr, ptr)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
Type::getVoidTy(Ctx),
{
@@ -745,7 +711,6 @@ TEST_F(VFABIParserTest, ParseWiderReturnTypeSVE) {
EXPECT_TRUE(invokeParser("_ZGVsMxvv_foo(vector_foo)", "i64(i32, i32)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
VectorType::get(Type::getInt64Ty(Ctx), ElementCount::getScalable(2)),
{
@@ -769,7 +734,6 @@ TEST_F(VFABIParserTest, ParseVoidReturnTypeSVE) {
EXPECT_TRUE(invokeParser("_ZGVsMxv_foo(vector_foo)", "void(i16)"));
EXPECT_EQ(ISA, VFISAKind::SVE);
EXPECT_TRUE(isMasked());
- EXPECT_TRUE(matchParametersNum());
FunctionType *FTy = FunctionType::get(
Type::getVoidTy(Ctx),
{
diff --git a/llvm/unittests/Object/ELFObjectFileTest.cpp b/llvm/unittests/Object/ELFObjectFileTest.cpp
index 48bebc54ccc2..3a2a8e3d3264 100644
--- a/llvm/unittests/Object/ELFObjectFileTest.cpp
+++ b/llvm/unittests/Object/ELFObjectFileTest.cpp
@@ -1017,10 +1017,23 @@ Sections:
BrProb: 0xffffffff
- BBFreq: 1000
Successors: []
-)");
+ - Name: .llvm_bb_addr_map_5
+ Type: SHT_LLVM_BB_ADDR_MAP
+ # Link: 0 (by default, can be overriden)
+ Entries:
+ - Version: 2
+ Address: 0x55555
+ Feature: 0x0
+ BBEntries:
+ - ID: 2
+ AddressOffset: 0x0
+ Size: 0x2
+ Metadata: 0x4
+ PGOAnalyses: [{}]
+ )");
BBAddrMap E1(0x11111, {{1, 0x0, 0x1, {false, true, false, false, false}}});
- PGOAnalysisMap P1 = {892, {{}}, {true, false, false}};
+ PGOAnalysisMap P1 = {892, {}, {true, false, false}};
BBAddrMap E2(0x22222, {{2, 0x0, 0x2, {false, false, true, false, false}}});
PGOAnalysisMap P2 = {{}, {{BlockFrequency(343), {}}}, {false, true, false}};
BBAddrMap E3(0x33333, {{0, 0x0, 0x3, {false, true, true, false, false}},
@@ -1049,16 +1062,18 @@ Sections:
{BlockFrequency(18), {{3, BranchProbability::getRaw(0xffff'ffff)}}},
{BlockFrequency(1000), {}}},
{true, true, true}};
+ BBAddrMap E5(0x55555, {{2, 0x0, 0x2, {false, false, true, false, false}}});
+ PGOAnalysisMap P5 = {{}, {}, {false, false, false}};
- std::vector<BBAddrMap> Section0BBAddrMaps = {E4};
+ std::vector<BBAddrMap> Section0BBAddrMaps = {E4, E5};
std::vector<BBAddrMap> Section1BBAddrMaps = {E3};
std::vector<BBAddrMap> Section2BBAddrMaps = {E1, E2};
- std::vector<BBAddrMap> AllBBAddrMaps = {E1, E2, E3, E4};
+ std::vector<BBAddrMap> AllBBAddrMaps = {E1, E2, E3, E4, E5};
- std::vector<PGOAnalysisMap> Section0PGOAnalysisMaps = {P4};
+ std::vector<PGOAnalysisMap> Section0PGOAnalysisMaps = {P4, P5};
std::vector<PGOAnalysisMap> Section1PGOAnalysisMaps = {P3};
std::vector<PGOAnalysisMap> Section2PGOAnalysisMaps = {P1, P2};
- std::vector<PGOAnalysisMap> AllPGOAnalysisMaps = {P1, P2, P3, P4};
+ std::vector<PGOAnalysisMap> AllPGOAnalysisMaps = {P1, P2, P3, P4, P5};
auto DoCheckSucceeds =
[&](StringRef YamlString, std::optional<unsigned> TextSectionIndex,
@@ -1081,6 +1096,10 @@ Sections:
if (ExpectedPGO) {
EXPECT_EQ(BBAddrMaps->size(), PGOAnalyses.size());
EXPECT_EQ(PGOAnalyses, *ExpectedPGO);
+ for (auto &&[BB, PGO] : llvm::zip(*BBAddrMaps, PGOAnalyses)) {
+ if (PGO.FeatEnable.BBFreq || PGO.FeatEnable.BrProb)
+ EXPECT_EQ(BB.getBBEntries().size(), PGO.BBEntries.size());
+ }
}
};
@@ -1132,9 +1151,9 @@ Sections:
Link: 10
)";
- DoCheckFails(InvalidLinkedYamlString, /*TextSectionIndex=*/4,
+ DoCheckFails(InvalidLinkedYamlString, /*TextSectionIndex=*/5,
"unable to get the linked-to section for "
- "SHT_LLVM_BB_ADDR_MAP section with index 4: invalid section "
+ "SHT_LLVM_BB_ADDR_MAP section with index 5: invalid section "
"index: 10");
// Linked sections are not checked when we don't target a specific text
// section.
@@ -1150,7 +1169,7 @@ Sections:
)";
DoCheckFails(TruncatedYamlString, /*TextSectionIndex=*/std::nullopt,
- "unable to read SHT_LLVM_BB_ADDR_MAP section with index 4: "
+ "unable to read SHT_LLVM_BB_ADDR_MAP section with index 5: "
"unable to decode LEB128 at offset 0x0000000a: malformed "
"uleb128, extends past end");
// Check that we can read the other section's bb-address-maps which are
diff --git a/llvm/utils/TableGen/CMakeLists.txt b/llvm/utils/TableGen/CMakeLists.txt
index f765cc36d3be..0100bf345ec2 100644
--- a/llvm/utils/TableGen/CMakeLists.txt
+++ b/llvm/utils/TableGen/CMakeLists.txt
@@ -82,7 +82,7 @@ add_tablegen(llvm-tblgen LLVM
Types.cpp
VarLenCodeEmitterGen.cpp
X86DisassemblerTables.cpp
- X86EVEX2VEXTablesEmitter.cpp
+ X86CompressEVEXTablesEmitter.cpp
X86FoldTablesEmitter.cpp
X86MnemonicTables.cpp
X86ModRMFilters.cpp
diff --git a/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp b/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
new file mode 100644
index 000000000000..aa8527e75380
--- /dev/null
+++ b/llvm/utils/TableGen/X86CompressEVEXTablesEmitter.cpp
@@ -0,0 +1,206 @@
+//==- utils/TableGen/X86CompressEVEXTablesEmitter.cpp - X86 backend-*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This tablegen backend is responsible for emitting the X86 backend EVEX
+/// compression tables.
+///
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "X86RecognizableInstr.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include <map>
+#include <set>
+
+using namespace llvm;
+using namespace X86Disassembler;
+
+namespace {
+
+const std::map<StringRef, StringRef> ManualMap = {
+#define ENTRY(OLD, NEW) {#OLD, #NEW},
+#include "X86ManualCompressEVEXTables.def"
+};
+const std::set<StringRef> NoCompressSet = {
+#define NOCOMP(INSN) #INSN,
+#include "X86ManualCompressEVEXTables.def"
+};
+
+class X86CompressEVEXTablesEmitter {
+ RecordKeeper &Records;
+ CodeGenTarget Target;
+
+ // Hold all pontentially compressible EVEX instructions
+ std::vector<const CodeGenInstruction *> PreCompressionInsts;
+ // Hold all compressed instructions. Divided into groups with same opcodes
+ // to make the search more efficient
+ std::map<uint64_t, std::vector<const CodeGenInstruction *>> CompressedInsts;
+
+ typedef std::pair<const CodeGenInstruction *, const CodeGenInstruction *>
+ Entry;
+
+ std::vector<Entry> Table;
+
+public:
+ X86CompressEVEXTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {}
+
+ // run - Output X86 EVEX compression tables.
+ void run(raw_ostream &OS);
+
+private:
+ // Prints the given table as a C++ array of type X86CompressEVEXTableEntry
+ void printTable(const std::vector<Entry> &Table, raw_ostream &OS);
+};
+
+void X86CompressEVEXTablesEmitter::printTable(const std::vector<Entry> &Table,
+ raw_ostream &OS) {
+
+ OS << "static const X86CompressEVEXTableEntry X86CompressEVEXTable[] = { \n";
+
+ // Print all entries added to the table
+ for (const auto &Pair : Table)
+ OS << " { X86::" << Pair.first->TheDef->getName()
+ << ", X86::" << Pair.second->TheDef->getName() << " },\n";
+
+ OS << "};\n\n";
+}
+
+static uint8_t byteFromBitsInit(const BitsInit *B) {
+ unsigned N = B->getNumBits();
+ assert(N <= 8 && "Field is too large for uint8_t!");
+
+ uint8_t Value = 0;
+ for (unsigned I = 0; I != N; ++I) {
+ BitInit *Bit = cast<BitInit>(B->getBit(I));
+ Value |= Bit->getValue() << I;
+ }
+ return Value;
+}
+
+class IsMatch {
+ const CodeGenInstruction *OldInst;
+
+public:
+ IsMatch(const CodeGenInstruction *OldInst) : OldInst(OldInst) {}
+
+ bool operator()(const CodeGenInstruction *NewInst) {
+ RecognizableInstrBase NewRI(*NewInst);
+ RecognizableInstrBase OldRI(*OldInst);
+
+ // Return false if any of the following fields of does not match.
+ if (std::make_tuple(OldRI.IsCodeGenOnly, OldRI.OpMap, NewRI.OpPrefix,
+ OldRI.HasVEX_4V, OldRI.HasVEX_L, OldRI.HasREX_W,
+ OldRI.Form) !=
+ std::make_tuple(NewRI.IsCodeGenOnly, NewRI.OpMap, OldRI.OpPrefix,
+ NewRI.HasVEX_4V, NewRI.HasVEX_L, NewRI.HasREX_W,
+ NewRI.Form))
+ return false;
+
+ for (unsigned I = 0, E = OldInst->Operands.size(); I < E; ++I) {
+ Record *OldOpRec = OldInst->Operands[I].Rec;
+ Record *NewOpRec = NewInst->Operands[I].Rec;
+
+ if (OldOpRec == NewOpRec)
+ continue;
+
+ if (isRegisterOperand(OldOpRec) && isRegisterOperand(NewOpRec)) {
+ if (getRegOperandSize(OldOpRec) != getRegOperandSize(NewOpRec))
+ return false;
+ } else if (isMemoryOperand(OldOpRec) && isMemoryOperand(NewOpRec)) {
+ if (getMemOperandSize(OldOpRec) != getMemOperandSize(NewOpRec))
+ return false;
+ } else if (isImmediateOperand(OldOpRec) && isImmediateOperand(NewOpRec)) {
+ if (OldOpRec->getValueAsDef("Type") != NewOpRec->getValueAsDef("Type"))
+ return false;
+ }
+ }
+
+ return true;
+ }
+};
+
+void X86CompressEVEXTablesEmitter::run(raw_ostream &OS) {
+ emitSourceFileHeader("X86 EVEX compression tables", OS);
+
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions =
+ Target.getInstructionsByEnumValue();
+
+ for (const CodeGenInstruction *Inst : NumberedInstructions) {
+ const Record *Rec = Inst->TheDef;
+ StringRef Name = Rec->getName();
+ // _REV instruction should not appear before encoding optimization
+ if (!Rec->isSubClassOf("X86Inst") ||
+ Rec->getValueAsBit("isAsmParserOnly") || Name.ends_with("_REV"))
+ continue;
+
+ // Promoted legacy instruction is in EVEX space, and has REX2-encoding
+ // alternative. It's added due to HW design and never emitted by compiler.
+ if (byteFromBitsInit(Rec->getValueAsBitsInit("OpMapBits")) ==
+ X86Local::T_MAP4 &&
+ byteFromBitsInit(Rec->getValueAsBitsInit("explicitOpPrefixBits")) ==
+ X86Local::ExplicitEVEX)
+ continue;
+
+ if (NoCompressSet.find(Name) != NoCompressSet.end())
+ continue;
+
+ RecognizableInstrBase RI(*Inst);
+
+ bool IsND = RI.OpMap == X86Local::T_MAP4 && RI.HasEVEX_B && RI.HasVEX_4V;
+ // Add VEX encoded instructions to one of CompressedInsts vectors according
+ // to it's opcode.
+ if (RI.Encoding == X86Local::VEX)
+ CompressedInsts[RI.Opcode].push_back(Inst);
+ // Add relevant EVEX encoded instructions to PreCompressionInsts
+ else if (RI.Encoding == X86Local::EVEX && !RI.HasEVEX_K && !RI.HasEVEX_L2 &&
+ (!RI.HasEVEX_B || IsND))
+ PreCompressionInsts.push_back(Inst);
+ }
+
+ for (const CodeGenInstruction *Inst : PreCompressionInsts) {
+ const Record *Rec = Inst->TheDef;
+ uint8_t Opcode = byteFromBitsInit(Rec->getValueAsBitsInit("Opcode"));
+ StringRef Name = Rec->getName();
+ const CodeGenInstruction *NewInst = nullptr;
+ if (ManualMap.find(Name) != ManualMap.end()) {
+ Record *NewRec = Records.getDef(ManualMap.at(Rec->getName()));
+ assert(NewRec && "Instruction not found!");
+ NewInst = &Target.getInstruction(NewRec);
+ } else if (Name.ends_with("_EVEX")) {
+ if (auto *NewRec = Records.getDef(Name.drop_back(5)))
+ NewInst = &Target.getInstruction(NewRec);
+ } else if (Name.ends_with("_ND")) {
+ if (auto *NewRec = Records.getDef(Name.drop_back(3))) {
+ auto &TempInst = Target.getInstruction(NewRec);
+ if (isRegisterOperand(TempInst.Operands[0].Rec))
+ NewInst = &TempInst;
+ }
+ } else {
+ // For each pre-compression instruction look for a match in the appropriate
+ // vector (instructions with the same opcode) using function object
+ // IsMatch.
+ auto Match = llvm::find_if(CompressedInsts[Opcode], IsMatch(Inst));
+ if (Match != CompressedInsts[Opcode].end())
+ NewInst = *Match;
+ }
+
+ if (!NewInst)
+ continue;
+
+ Table.push_back(std::make_pair(Inst, NewInst));
+ }
+
+ printTable(Table, OS);
+}
+} // namespace
+
+static TableGen::Emitter::OptClass<X86CompressEVEXTablesEmitter>
+ X("gen-x86-compress-evex-tables", "Generate X86 EVEX compression tables");
diff --git a/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp b/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
deleted file mode 100644
index c80d9a199fa3..000000000000
--- a/llvm/utils/TableGen/X86EVEX2VEXTablesEmitter.cpp
+++ /dev/null
@@ -1,210 +0,0 @@
-//===- utils/TableGen/X86EVEX2VEXTablesEmitter.cpp - X86 backend-*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// This tablegen backend is responsible for emitting the X86 backend EVEX2VEX
-/// compression tables.
-///
-//===----------------------------------------------------------------------===//
-
-#include "CodeGenInstruction.h"
-#include "CodeGenTarget.h"
-#include "X86RecognizableInstr.h"
-#include "llvm/TableGen/Error.h"
-#include "llvm/TableGen/Record.h"
-#include "llvm/TableGen/TableGenBackend.h"
-
-using namespace llvm;
-using namespace X86Disassembler;
-
-namespace {
-
-class X86EVEX2VEXTablesEmitter {
- RecordKeeper &Records;
- CodeGenTarget Target;
-
- // Hold all non-masked & non-broadcasted EVEX encoded instructions
- std::vector<const CodeGenInstruction *> EVEXInsts;
- // Hold all VEX encoded instructions. Divided into groups with same opcodes
- // to make the search more efficient
- std::map<uint64_t, std::vector<const CodeGenInstruction *>> VEXInsts;
-
- typedef std::pair<const CodeGenInstruction *, const CodeGenInstruction *>
- Entry;
-
- // Represent both compress tables
- std::vector<Entry> EVEX2VEX128;
- std::vector<Entry> EVEX2VEX256;
-
-public:
- X86EVEX2VEXTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {}
-
- // run - Output X86 EVEX2VEX tables.
- void run(raw_ostream &OS);
-
-private:
- // Prints the given table as a C++ array of type
- // X86EvexToVexCompressTableEntry
- void printTable(const std::vector<Entry> &Table, raw_ostream &OS);
-};
-
-void X86EVEX2VEXTablesEmitter::printTable(const std::vector<Entry> &Table,
- raw_ostream &OS) {
- StringRef Size = (Table == EVEX2VEX128) ? "128" : "256";
-
- OS << "// X86 EVEX encoded instructions that have a VEX " << Size
- << " encoding\n"
- << "// (table format: <EVEX opcode, VEX-" << Size << " opcode>).\n"
- << "static const X86EvexToVexCompressTableEntry X86EvexToVex" << Size
- << "CompressTable[] = {\n"
- << " // EVEX scalar with corresponding VEX.\n";
-
- // Print all entries added to the table
- for (const auto &Pair : Table) {
- OS << " { X86::" << Pair.first->TheDef->getName()
- << ", X86::" << Pair.second->TheDef->getName() << " },\n";
- }
-
- OS << "};\n\n";
-}
-
-// Return true if the 2 BitsInits are equal
-// Calculates the integer value residing BitsInit object
-static inline uint64_t getValueFromBitsInit(const BitsInit *B) {
- uint64_t Value = 0;
- for (unsigned i = 0, e = B->getNumBits(); i != e; ++i) {
- if (BitInit *Bit = dyn_cast<BitInit>(B->getBit(i)))
- Value |= uint64_t(Bit->getValue()) << i;
- else
- PrintFatalError("Invalid VectSize bit");
- }
- return Value;
-}
-
-// Function object - Operator() returns true if the given VEX instruction
-// matches the EVEX instruction of this object.
-class IsMatch {
- const CodeGenInstruction *EVEXInst;
-
-public:
- IsMatch(const CodeGenInstruction *EVEXInst) : EVEXInst(EVEXInst) {}
-
- bool operator()(const CodeGenInstruction *VEXInst) {
- RecognizableInstrBase VEXRI(*VEXInst);
- RecognizableInstrBase EVEXRI(*EVEXInst);
- bool VEX_W = VEXRI.HasREX_W;
- bool EVEX_W = EVEXRI.HasREX_W;
- bool VEX_WIG = VEXRI.IgnoresW;
- bool EVEX_WIG = EVEXRI.IgnoresW;
- bool EVEX_W1_VEX_W0 = EVEXInst->TheDef->getValueAsBit("EVEX_W1_VEX_W0");
-
- if (VEXRI.IsCodeGenOnly != EVEXRI.IsCodeGenOnly ||
- // VEX/EVEX fields
- VEXRI.OpPrefix != EVEXRI.OpPrefix || VEXRI.OpMap != EVEXRI.OpMap ||
- VEXRI.HasVEX_4V != EVEXRI.HasVEX_4V ||
- VEXRI.HasVEX_L != EVEXRI.HasVEX_L ||
- // Match is allowed if either is VEX_WIG, or they match, or EVEX
- // is VEX_W1X and VEX is VEX_W0.
- (!(VEX_WIG || (!EVEX_WIG && EVEX_W == VEX_W) ||
- (EVEX_W1_VEX_W0 && EVEX_W && !VEX_W))) ||
- // Instruction's format
- VEXRI.Form != EVEXRI.Form)
- return false;
-
- // This is needed for instructions with intrinsic version (_Int).
- // Where the only difference is the size of the operands.
- // For example: VUCOMISDZrm and Int_VUCOMISDrm
- // Also for instructions that their EVEX version was upgraded to work with
- // k-registers. For example VPCMPEQBrm (xmm output register) and
- // VPCMPEQBZ128rm (k register output register).
- for (unsigned i = 0, e = EVEXInst->Operands.size(); i < e; i++) {
- Record *OpRec1 = EVEXInst->Operands[i].Rec;
- Record *OpRec2 = VEXInst->Operands[i].Rec;
-
- if (OpRec1 == OpRec2)
- continue;
-
- if (isRegisterOperand(OpRec1) && isRegisterOperand(OpRec2)) {
- if (getRegOperandSize(OpRec1) != getRegOperandSize(OpRec2))
- return false;
- } else if (isMemoryOperand(OpRec1) && isMemoryOperand(OpRec2)) {
- return false;
- } else if (isImmediateOperand(OpRec1) && isImmediateOperand(OpRec2)) {
- if (OpRec1->getValueAsDef("Type") != OpRec2->getValueAsDef("Type")) {
- return false;
- }
- } else
- return false;
- }
-
- return true;
- }
-};
-
-void X86EVEX2VEXTablesEmitter::run(raw_ostream &OS) {
- emitSourceFileHeader("X86 EVEX2VEX tables", OS);
-
- ArrayRef<const CodeGenInstruction *> NumberedInstructions =
- Target.getInstructionsByEnumValue();
-
- for (const CodeGenInstruction *Inst : NumberedInstructions) {
- const Record *Def = Inst->TheDef;
- // Filter non-X86 instructions.
- if (!Def->isSubClassOf("X86Inst"))
- continue;
- // _REV instruction should not appear before encoding optimization
- if (Def->getName().ends_with("_REV"))
- continue;
- RecognizableInstrBase RI(*Inst);
-
- // Add VEX encoded instructions to one of VEXInsts vectors according to
- // it's opcode.
- if (RI.Encoding == X86Local::VEX)
- VEXInsts[RI.Opcode].push_back(Inst);
- // Add relevant EVEX encoded instructions to EVEXInsts
- else if (RI.Encoding == X86Local::EVEX && !RI.HasEVEX_K && !RI.HasEVEX_B &&
- !RI.HasEVEX_L2 && !Def->getValueAsBit("notEVEX2VEXConvertible"))
- EVEXInsts.push_back(Inst);
- }
-
- for (const CodeGenInstruction *EVEXInst : EVEXInsts) {
- uint64_t Opcode = getValueFromBitsInit(EVEXInst->TheDef->
- getValueAsBitsInit("Opcode"));
- // For each EVEX instruction look for a VEX match in the appropriate vector
- // (instructions with the same opcode) using function object IsMatch.
- // Allow EVEX2VEXOverride to explicitly specify a match.
- const CodeGenInstruction *VEXInst = nullptr;
- if (!EVEXInst->TheDef->isValueUnset("EVEX2VEXOverride")) {
- StringRef AltInstStr =
- EVEXInst->TheDef->getValueAsString("EVEX2VEXOverride");
- Record *AltInstRec = Records.getDef(AltInstStr);
- assert(AltInstRec && "EVEX2VEXOverride instruction not found!");
- VEXInst = &Target.getInstruction(AltInstRec);
- } else {
- auto Match = llvm::find_if(VEXInsts[Opcode], IsMatch(EVEXInst));
- if (Match != VEXInsts[Opcode].end())
- VEXInst = *Match;
- }
-
- if (!VEXInst)
- continue;
-
- // In case a match is found add new entry to the appropriate table
- if (EVEXInst->TheDef->getValueAsBit("hasVEX_L"))
- EVEX2VEX256.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,1}
- else
- EVEX2VEX128.push_back(std::make_pair(EVEXInst, VEXInst)); // {0,0}
- }
-
- // Print both tables
- printTable(EVEX2VEX128, OS);
- printTable(EVEX2VEX256, OS);
-}
-} // namespace
-
-static TableGen::Emitter::OptClass<X86EVEX2VEXTablesEmitter>
- X("gen-x86-EVEX2VEX-tables", "Generate X86 EVEX to VEX compress tables");
diff --git a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
index 101b75e2f087..8a860d0945bb 100644
--- a/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
+++ b/llvm/utils/TableGen/X86FoldTablesEmitter.cpp
@@ -374,8 +374,7 @@ public:
RegRI.HasEVEX_L2, RegRI.HasEVEX_NF,
RegRec->getValueAsBit("hasEVEX_RC"),
RegRec->getValueAsBit("hasLockPrefix"),
- RegRec->getValueAsBit("hasNoTrackPrefix"),
- RegRec->getValueAsBit("EVEX_W1_VEX_W0")) !=
+ RegRec->getValueAsBit("hasNoTrackPrefix")) !=
std::make_tuple(MemRI.Encoding, MemRI.Opcode, MemRI.OpPrefix,
MemRI.OpMap, MemRI.OpSize, MemRI.AdSize, MemRI.HasREX_W,
MemRI.HasVEX_4V, MemRI.HasVEX_L, MemRI.IgnoresVEX_L,
@@ -383,8 +382,7 @@ public:
MemRI.HasEVEX_L2, MemRI.HasEVEX_NF,
MemRec->getValueAsBit("hasEVEX_RC"),
MemRec->getValueAsBit("hasLockPrefix"),
- MemRec->getValueAsBit("hasNoTrackPrefix"),
- MemRec->getValueAsBit("EVEX_W1_VEX_W0")))
+ MemRec->getValueAsBit("hasNoTrackPrefix")))
return false;
// Make sure the sizes of the operands of both instructions suit each other.
diff --git a/llvm/utils/TableGen/X86ManualCompressEVEXTables.def b/llvm/utils/TableGen/X86ManualCompressEVEXTables.def
new file mode 100644
index 000000000000..58ca10e9e10f
--- /dev/null
+++ b/llvm/utils/TableGen/X86ManualCompressEVEXTables.def
@@ -0,0 +1,331 @@
+//===- X86ManualCompressEVEXTables.def ---------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// \file
+// This file defines all the entries in X86 EVEX compression tables that need
+// special handling.
+//===----------------------------------------------------------------------===//
+
+#ifndef NOCOMP
+#define NOCOMP(INSN)
+#endif
+NOCOMP(VCVTQQ2PDZ128rr)
+NOCOMP(VCVTQQ2PSZ128rm)
+NOCOMP(VCVTQQ2PSZ128rr)
+NOCOMP(VDBPSADBWZ128rmi)
+NOCOMP(VDBPSADBWZ128rri)
+NOCOMP(VPMAXSQZ128rm)
+NOCOMP(VPMAXSQZ128rr)
+NOCOMP(VPMAXUQZ128rm)
+NOCOMP(VPMAXUQZ128rr)
+NOCOMP(VPMINSQZ128rm)
+NOCOMP(VPMINSQZ128rr)
+NOCOMP(VPMINUQZ128rm)
+NOCOMP(VPMINUQZ128rr)
+NOCOMP(VPMULLQZ128rm)
+NOCOMP(VPMULLQZ128rr)
+NOCOMP(VPSRAQZ128ri)
+NOCOMP(VPSRAQZ128rm)
+NOCOMP(VPSRAQZ128rr)
+NOCOMP(VSCALEFPSZ128rm)
+NOCOMP(VDBPSADBWZ256rmi)
+NOCOMP(VDBPSADBWZ256rri)
+NOCOMP(VPMAXSQZ256rm)
+NOCOMP(VPMAXSQZ256rr)
+NOCOMP(VPMAXUQZ256rm)
+NOCOMP(VPMAXUQZ256rr)
+NOCOMP(VPMINSQZ256rm)
+NOCOMP(VPMINSQZ256rr)
+NOCOMP(VPMINUQZ256rm)
+NOCOMP(VPMINUQZ256rr)
+NOCOMP(VPMULLQZ256rm)
+NOCOMP(VPMULLQZ256rr)
+NOCOMP(VPSRAQZ256ri)
+NOCOMP(VPSRAQZ256rm)
+NOCOMP(VPSRAQZ256rr)
+NOCOMP(VSCALEFPSZ256rm)
+#undef NOCOMP
+
+#ifndef ENTRY
+#define ENTRY(OLD, NEW)
+#endif
+ENTRY(VALIGNDZ128rmi, VPALIGNRrmi)
+ENTRY(VALIGNDZ128rri, VPALIGNRrri)
+ENTRY(VALIGNQZ128rmi, VPALIGNRrmi)
+ENTRY(VALIGNQZ128rri, VPALIGNRrri)
+ENTRY(VMAXSDZrm, VMAXSDrm)
+ENTRY(VMAXSDZrr, VMAXSDrr)
+ENTRY(VMAXSSZrm, VMAXSSrm)
+ENTRY(VMAXSSZrr, VMAXSSrr)
+ENTRY(VMINSDZrm, VMINSDrm)
+ENTRY(VMINSDZrr, VMINSDrr)
+ENTRY(VMINSSZrm, VMINSSrm)
+ENTRY(VMINSSZrr, VMINSSrr)
+ENTRY(VMOVDQU16Z128mr, VMOVDQUmr)
+ENTRY(VMOVDQU16Z128rm, VMOVDQUrm)
+ENTRY(VMOVDQU16Z128rr, VMOVDQUrr)
+ENTRY(VMOVDQU8Z128mr, VMOVDQUmr)
+ENTRY(VMOVDQU8Z128rm, VMOVDQUrm)
+ENTRY(VMOVDQU8Z128rr, VMOVDQUrr)
+ENTRY(VMOVDQU16Z256mr, VMOVDQUYmr)
+ENTRY(VMOVDQU16Z256rm, VMOVDQUYrm)
+ENTRY(VMOVDQU16Z256rr, VMOVDQUYrr)
+ENTRY(VMOVDQU8Z256mr, VMOVDQUYmr)
+ENTRY(VMOVDQU8Z256rm, VMOVDQUYrm)
+ENTRY(VMOVDQU8Z256rr, VMOVDQUYrr)
+ENTRY(VSHUFF32X4Z256rmi, VPERM2F128rm)
+ENTRY(VSHUFF32X4Z256rri, VPERM2F128rr)
+ENTRY(VSHUFF64X2Z256rmi, VPERM2F128rm)
+ENTRY(VSHUFF64X2Z256rri, VPERM2F128rr)
+ENTRY(VSHUFI32X4Z256rmi, VPERM2I128rm)
+ENTRY(VSHUFI32X4Z256rri, VPERM2I128rr)
+ENTRY(VSHUFI64X2Z256rmi, VPERM2I128rm)
+ENTRY(VSHUFI64X2Z256rri, VPERM2I128rr)
+// W bit does not match
+ENTRY(VADDPDZ128rm, VADDPDrm)
+ENTRY(VADDPDZ128rr, VADDPDrr)
+ENTRY(VADDSDZrm, VADDSDrm)
+ENTRY(VADDSDZrm_Int, VADDSDrm_Int)
+ENTRY(VADDSDZrr, VADDSDrr)
+ENTRY(VADDSDZrr_Int, VADDSDrr_Int)
+ENTRY(VANDNPDZ128rm, VANDNPDrm)
+ENTRY(VANDNPDZ128rr, VANDNPDrr)
+ENTRY(VANDPDZ128rm, VANDPDrm)
+ENTRY(VANDPDZ128rr, VANDPDrr)
+ENTRY(VCOMISDZrm, VCOMISDrm)
+ENTRY(VCOMISDZrm_Int, VCOMISDrm_Int)
+ENTRY(VCOMISDZrr, VCOMISDrr)
+ENTRY(VCOMISDZrr_Int, VCOMISDrr_Int)
+ENTRY(VCVTPD2DQZ128rm, VCVTPD2DQrm)
+ENTRY(VCVTPD2DQZ128rr, VCVTPD2DQrr)
+ENTRY(VCVTPD2PSZ128rm, VCVTPD2PSrm)
+ENTRY(VCVTPD2PSZ128rr, VCVTPD2PSrr)
+ENTRY(VCVTSD2SSZrm, VCVTSD2SSrm)
+ENTRY(VCVTSD2SSZrm_Int, VCVTSD2SSrm_Int)
+ENTRY(VCVTSD2SSZrr, VCVTSD2SSrr)
+ENTRY(VCVTSD2SSZrr_Int, VCVTSD2SSrr_Int)
+ENTRY(VCVTTPD2DQZ128rm, VCVTTPD2DQrm)
+ENTRY(VCVTTPD2DQZ128rr, VCVTTPD2DQrr)
+ENTRY(VDIVPDZ128rm, VDIVPDrm)
+ENTRY(VDIVPDZ128rr, VDIVPDrr)
+ENTRY(VDIVSDZrm, VDIVSDrm)
+ENTRY(VDIVSDZrm_Int, VDIVSDrm_Int)
+ENTRY(VDIVSDZrr, VDIVSDrr)
+ENTRY(VDIVSDZrr_Int, VDIVSDrr_Int)
+ENTRY(VMAXCPDZ128rm, VMAXCPDrm)
+ENTRY(VMAXCPDZ128rr, VMAXCPDrr)
+ENTRY(VMAXCSDZrm, VMAXCSDrm)
+ENTRY(VMAXCSDZrr, VMAXCSDrr)
+ENTRY(VMAXPDZ128rm, VMAXPDrm)
+ENTRY(VMAXPDZ128rr, VMAXPDrr)
+ENTRY(VMAXSDZrm_Int, VMAXSDrm_Int)
+ENTRY(VMAXSDZrr_Int, VMAXSDrr_Int)
+ENTRY(VMINCPDZ128rm, VMINCPDrm)
+ENTRY(VMINCPDZ128rr, VMINCPDrr)
+ENTRY(VMINCSDZrm, VMINCSDrm)
+ENTRY(VMINCSDZrr, VMINCSDrr)
+ENTRY(VMINPDZ128rm, VMINPDrm)
+ENTRY(VMINPDZ128rr, VMINPDrr)
+ENTRY(VMINSDZrm_Int, VMINSDrm_Int)
+ENTRY(VMINSDZrr_Int, VMINSDrr_Int)
+ENTRY(VMOVAPDZ128mr, VMOVAPDmr)
+ENTRY(VMOVAPDZ128rm, VMOVAPDrm)
+ENTRY(VMOVAPDZ128rr, VMOVAPDrr)
+ENTRY(VMOVDDUPZ128rm, VMOVDDUPrm)
+ENTRY(VMOVDDUPZ128rr, VMOVDDUPrr)
+ENTRY(VMOVDQA64Z128mr, VMOVDQAmr)
+ENTRY(VMOVDQA64Z128rm, VMOVDQArm)
+ENTRY(VMOVDQA64Z128rr, VMOVDQArr)
+ENTRY(VMOVDQU64Z128mr, VMOVDQUmr)
+ENTRY(VMOVDQU64Z128rm, VMOVDQUrm)
+ENTRY(VMOVDQU64Z128rr, VMOVDQUrr)
+ENTRY(VMOVHPDZ128mr, VMOVHPDmr)
+ENTRY(VMOVHPDZ128rm, VMOVHPDrm)
+ENTRY(VMOVLPDZ128mr, VMOVLPDmr)
+ENTRY(VMOVLPDZ128rm, VMOVLPDrm)
+ENTRY(VMOVNTPDZ128mr, VMOVNTPDmr)
+ENTRY(VMOVPQI2QIZmr, VMOVPQI2QImr)
+ENTRY(VMOVPQI2QIZrr, VMOVPQI2QIrr)
+ENTRY(VMOVQI2PQIZrm, VMOVQI2PQIrm)
+ENTRY(VMOVSDZmr, VMOVSDmr)
+ENTRY(VMOVSDZrm, VMOVSDrm)
+ENTRY(VMOVSDZrm_alt, VMOVSDrm_alt)
+ENTRY(VMOVSDZrr, VMOVSDrr)
+ENTRY(VMOVUPDZ128mr, VMOVUPDmr)
+ENTRY(VMOVUPDZ128rm, VMOVUPDrm)
+ENTRY(VMOVUPDZ128rr, VMOVUPDrr)
+ENTRY(VMOVZPQILo2PQIZrr, VMOVZPQILo2PQIrr)
+ENTRY(VMULPDZ128rm, VMULPDrm)
+ENTRY(VMULPDZ128rr, VMULPDrr)
+ENTRY(VMULSDZrm, VMULSDrm)
+ENTRY(VMULSDZrm_Int, VMULSDrm_Int)
+ENTRY(VMULSDZrr, VMULSDrr)
+ENTRY(VMULSDZrr_Int, VMULSDrr_Int)
+ENTRY(VORPDZ128rm, VORPDrm)
+ENTRY(VORPDZ128rr, VORPDrr)
+ENTRY(VPADDQZ128rm, VPADDQrm)
+ENTRY(VPADDQZ128rr, VPADDQrr)
+ENTRY(VPANDNQZ128rm, VPANDNrm)
+ENTRY(VPANDNQZ128rr, VPANDNrr)
+ENTRY(VPANDQZ128rm, VPANDrm)
+ENTRY(VPANDQZ128rr, VPANDrr)
+ENTRY(VPERMILPDZ128mi, VPERMILPDmi)
+ENTRY(VPERMILPDZ128ri, VPERMILPDri)
+ENTRY(VPERMILPDZ128rm, VPERMILPDrm)
+ENTRY(VPERMILPDZ128rr, VPERMILPDrr)
+ENTRY(VPMULDQZ128rm, VPMULDQrm)
+ENTRY(VPMULDQZ128rr, VPMULDQrr)
+ENTRY(VPMULUDQZ128rm, VPMULUDQrm)
+ENTRY(VPMULUDQZ128rr, VPMULUDQrr)
+ENTRY(VPORQZ128rm, VPORrm)
+ENTRY(VPORQZ128rr, VPORrr)
+ENTRY(VPSLLQZ128ri, VPSLLQri)
+ENTRY(VPSLLQZ128rm, VPSLLQrm)
+ENTRY(VPSLLQZ128rr, VPSLLQrr)
+ENTRY(VPSRLQZ128ri, VPSRLQri)
+ENTRY(VPSRLQZ128rm, VPSRLQrm)
+ENTRY(VPSRLQZ128rr, VPSRLQrr)
+ENTRY(VPSUBQZ128rm, VPSUBQrm)
+ENTRY(VPSUBQZ128rr, VPSUBQrr)
+ENTRY(VPUNPCKHQDQZ128rm, VPUNPCKHQDQrm)
+ENTRY(VPUNPCKHQDQZ128rr, VPUNPCKHQDQrr)
+ENTRY(VPUNPCKLQDQZ128rm, VPUNPCKLQDQrm)
+ENTRY(VPUNPCKLQDQZ128rr, VPUNPCKLQDQrr)
+ENTRY(VPXORQZ128rm, VPXORrm)
+ENTRY(VPXORQZ128rr, VPXORrr)
+ENTRY(VRNDSCALEPDZ128rmi, VROUNDPDm)
+ENTRY(VRNDSCALEPDZ128rri, VROUNDPDr)
+ENTRY(VRNDSCALESDZm, VROUNDSDm)
+ENTRY(VRNDSCALESDZm_Int, VROUNDSDm_Int)
+ENTRY(VRNDSCALESDZr, VROUNDSDr)
+ENTRY(VRNDSCALESDZr_Int, VROUNDSDr_Int)
+ENTRY(VSHUFPDZ128rmi, VSHUFPDrmi)
+ENTRY(VSHUFPDZ128rri, VSHUFPDrri)
+ENTRY(VSQRTPDZ128m, VSQRTPDm)
+ENTRY(VSQRTPDZ128r, VSQRTPDr)
+ENTRY(VSQRTSDZm, VSQRTSDm)
+ENTRY(VSQRTSDZm_Int, VSQRTSDm_Int)
+ENTRY(VSQRTSDZr, VSQRTSDr)
+ENTRY(VSQRTSDZr_Int, VSQRTSDr_Int)
+ENTRY(VSUBPDZ128rm, VSUBPDrm)
+ENTRY(VSUBPDZ128rr, VSUBPDrr)
+ENTRY(VSUBSDZrm, VSUBSDrm)
+ENTRY(VSUBSDZrm_Int, VSUBSDrm_Int)
+ENTRY(VSUBSDZrr, VSUBSDrr)
+ENTRY(VSUBSDZrr_Int, VSUBSDrr_Int)
+ENTRY(VUCOMISDZrm, VUCOMISDrm)
+ENTRY(VUCOMISDZrm_Int, VUCOMISDrm_Int)
+ENTRY(VUCOMISDZrr, VUCOMISDrr)
+ENTRY(VUCOMISDZrr_Int, VUCOMISDrr_Int)
+ENTRY(VUNPCKHPDZ128rm, VUNPCKHPDrm)
+ENTRY(VUNPCKHPDZ128rr, VUNPCKHPDrr)
+ENTRY(VUNPCKLPDZ128rm, VUNPCKLPDrm)
+ENTRY(VUNPCKLPDZ128rr, VUNPCKLPDrr)
+ENTRY(VXORPDZ128rm, VXORPDrm)
+ENTRY(VXORPDZ128rr, VXORPDrr)
+ENTRY(VADDPDZ256rm, VADDPDYrm)
+ENTRY(VADDPDZ256rr, VADDPDYrr)
+ENTRY(VANDNPDZ256rm, VANDNPDYrm)
+ENTRY(VANDNPDZ256rr, VANDNPDYrr)
+ENTRY(VANDPDZ256rm, VANDPDYrm)
+ENTRY(VANDPDZ256rr, VANDPDYrr)
+ENTRY(VCVTPD2DQZ256rm, VCVTPD2DQYrm)
+ENTRY(VCVTPD2DQZ256rr, VCVTPD2DQYrr)
+ENTRY(VCVTPD2PSZ256rm, VCVTPD2PSYrm)
+ENTRY(VCVTPD2PSZ256rr, VCVTPD2PSYrr)
+ENTRY(VCVTTPD2DQZ256rm, VCVTTPD2DQYrm)
+ENTRY(VCVTTPD2DQZ256rr, VCVTTPD2DQYrr)
+ENTRY(VDIVPDZ256rm, VDIVPDYrm)
+ENTRY(VDIVPDZ256rr, VDIVPDYrr)
+ENTRY(VEXTRACTF64x2Z256mr, VEXTRACTF128mr)
+ENTRY(VEXTRACTF64x2Z256rr, VEXTRACTF128rr)
+ENTRY(VEXTRACTI64x2Z256mr, VEXTRACTI128mr)
+ENTRY(VEXTRACTI64x2Z256rr, VEXTRACTI128rr)
+ENTRY(VINSERTF64x2Z256rm, VINSERTF128rm)
+ENTRY(VINSERTF64x2Z256rr, VINSERTF128rr)
+ENTRY(VINSERTI64x2Z256rm, VINSERTI128rm)
+ENTRY(VINSERTI64x2Z256rr, VINSERTI128rr)
+ENTRY(VMAXCPDZ256rm, VMAXCPDYrm)
+ENTRY(VMAXCPDZ256rr, VMAXCPDYrr)
+ENTRY(VMAXPDZ256rm, VMAXPDYrm)
+ENTRY(VMAXPDZ256rr, VMAXPDYrr)
+ENTRY(VMINCPDZ256rm, VMINCPDYrm)
+ENTRY(VMINCPDZ256rr, VMINCPDYrr)
+ENTRY(VMINPDZ256rm, VMINPDYrm)
+ENTRY(VMINPDZ256rr, VMINPDYrr)
+ENTRY(VMOVAPDZ256mr, VMOVAPDYmr)
+ENTRY(VMOVAPDZ256rm, VMOVAPDYrm)
+ENTRY(VMOVAPDZ256rr, VMOVAPDYrr)
+ENTRY(VMOVDDUPZ256rm, VMOVDDUPYrm)
+ENTRY(VMOVDDUPZ256rr, VMOVDDUPYrr)
+ENTRY(VMOVDQA64Z256mr, VMOVDQAYmr)
+ENTRY(VMOVDQA64Z256rm, VMOVDQAYrm)
+ENTRY(VMOVDQA64Z256rr, VMOVDQAYrr)
+ENTRY(VMOVDQU64Z256mr, VMOVDQUYmr)
+ENTRY(VMOVDQU64Z256rm, VMOVDQUYrm)
+ENTRY(VMOVDQU64Z256rr, VMOVDQUYrr)
+ENTRY(VMOVNTPDZ256mr, VMOVNTPDYmr)
+ENTRY(VMOVUPDZ256mr, VMOVUPDYmr)
+ENTRY(VMOVUPDZ256rm, VMOVUPDYrm)
+ENTRY(VMOVUPDZ256rr, VMOVUPDYrr)
+ENTRY(VMULPDZ256rm, VMULPDYrm)
+ENTRY(VMULPDZ256rr, VMULPDYrr)
+ENTRY(VORPDZ256rm, VORPDYrm)
+ENTRY(VORPDZ256rr, VORPDYrr)
+ENTRY(VPADDQZ256rm, VPADDQYrm)
+ENTRY(VPADDQZ256rr, VPADDQYrr)
+ENTRY(VPANDNQZ256rm, VPANDNYrm)
+ENTRY(VPANDNQZ256rr, VPANDNYrr)
+ENTRY(VPANDQZ256rm, VPANDYrm)
+ENTRY(VPANDQZ256rr, VPANDYrr)
+ENTRY(VPERMILPDZ256mi, VPERMILPDYmi)
+ENTRY(VPERMILPDZ256ri, VPERMILPDYri)
+ENTRY(VPERMILPDZ256rm, VPERMILPDYrm)
+ENTRY(VPERMILPDZ256rr, VPERMILPDYrr)
+ENTRY(VPMULDQZ256rm, VPMULDQYrm)
+ENTRY(VPMULDQZ256rr, VPMULDQYrr)
+ENTRY(VPMULUDQZ256rm, VPMULUDQYrm)
+ENTRY(VPMULUDQZ256rr, VPMULUDQYrr)
+ENTRY(VPORQZ256rm, VPORYrm)
+ENTRY(VPORQZ256rr, VPORYrr)
+ENTRY(VPSLLQZ256ri, VPSLLQYri)
+ENTRY(VPSLLQZ256rm, VPSLLQYrm)
+ENTRY(VPSLLQZ256rr, VPSLLQYrr)
+ENTRY(VPSRLQZ256ri, VPSRLQYri)
+ENTRY(VPSRLQZ256rm, VPSRLQYrm)
+ENTRY(VPSRLQZ256rr, VPSRLQYrr)
+ENTRY(VPSUBQZ256rm, VPSUBQYrm)
+ENTRY(VPSUBQZ256rr, VPSUBQYrr)
+ENTRY(VPUNPCKHQDQZ256rm, VPUNPCKHQDQYrm)
+ENTRY(VPUNPCKHQDQZ256rr, VPUNPCKHQDQYrr)
+ENTRY(VPUNPCKLQDQZ256rm, VPUNPCKLQDQYrm)
+ENTRY(VPUNPCKLQDQZ256rr, VPUNPCKLQDQYrr)
+ENTRY(VPXORQZ256rm, VPXORYrm)
+ENTRY(VPXORQZ256rr, VPXORYrr)
+ENTRY(VRNDSCALEPDZ256rmi, VROUNDPDYm)
+ENTRY(VRNDSCALEPDZ256rri, VROUNDPDYr)
+ENTRY(VSHUFPDZ256rmi, VSHUFPDYrmi)
+ENTRY(VSHUFPDZ256rri, VSHUFPDYrri)
+ENTRY(VSQRTPDZ256m, VSQRTPDYm)
+ENTRY(VSQRTPDZ256r, VSQRTPDYr)
+ENTRY(VSUBPDZ256rm, VSUBPDYrm)
+ENTRY(VSUBPDZ256rr, VSUBPDYrr)
+ENTRY(VUNPCKHPDZ256rm, VUNPCKHPDYrm)
+ENTRY(VUNPCKHPDZ256rr, VUNPCKHPDYrr)
+ENTRY(VUNPCKLPDZ256rm, VUNPCKLPDYrm)
+ENTRY(VUNPCKLPDZ256rr, VUNPCKLPDYrr)
+ENTRY(VXORPDZ256rm, VXORPDYrm)
+ENTRY(VXORPDZ256rr, VXORPDYrr)
+ENTRY(VPBROADCASTQZ128rm, VPBROADCASTQrm)
+ENTRY(VPBROADCASTQZ128rr, VPBROADCASTQrr)
+ENTRY(VBROADCASTF64X2Z128rm, VBROADCASTF128rm)
+ENTRY(VBROADCASTI64X2Z128rm, VBROADCASTI128rm)
+ENTRY(VBROADCASTSDZ256rm, VBROADCASTSDYrm)
+ENTRY(VBROADCASTSDZ256rr, VBROADCASTSDYrr)
+ENTRY(VPBROADCASTQZ256rm, VPBROADCASTQYrm)
+ENTRY(VPBROADCASTQZ256rr, VPBROADCASTQYrr)
+#undef ENTRY
diff --git a/llvm/utils/gn/secondary/libcxx/src/BUILD.gn b/llvm/utils/gn/secondary/libcxx/src/BUILD.gn
index c1211bb384d3..095cd5bb5b7c 100644
--- a/llvm/utils/gn/secondary/libcxx/src/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/src/BUILD.gn
@@ -122,6 +122,7 @@ cxx_sources = [
"condition_variable_destructor.cpp",
"error_category.cpp",
"exception.cpp",
+ "fstream.cpp",
"functional.cpp",
"future.cpp",
"hash.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/X86/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/X86/BUILD.gn
index e8aa57fc9bc1..12d45f1af1db 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/X86/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/X86/BUILD.gn
@@ -12,9 +12,9 @@ tablegen("X86GenDAGISel") {
td_file = "X86.td"
}
-tablegen("X86GenEVEX2VEXTables") {
+tablegen("X86GenCompressEVEXTables") {
visibility = [ ":LLVMX86CodeGen" ]
- args = [ "-gen-x86-EVEX2VEX-tables" ]
+ args = [ "-gen-x86-compress-evex-tables" ]
td_file = "X86.td"
}
@@ -48,8 +48,8 @@ tablegen("X86GenRegisterBank") {
static_library("LLVMX86CodeGen") {
deps = [
":X86GenCallingConv",
+ ":X86GenCompressEVEXTables",
":X86GenDAGISel",
- ":X86GenEVEX2VEXTables",
":X86GenFastISel",
":X86GenFoldTables",
":X86GenGlobalISel",
@@ -83,10 +83,10 @@ static_library("LLVMX86CodeGen") {
"X86CallFrameOptimization.cpp",
"X86CallingConv.cpp",
"X86CmovConversion.cpp",
+ "X86CompressEVEX.cpp",
"X86DiscriminateMemOps.cpp",
"X86DomainReassignment.cpp",
"X86DynAllocaExpander.cpp",
- "X86EvexToVex.cpp",
"X86ExpandPseudo.cpp",
"X86FastISel.cpp",
"X86FastPreTileConfig.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
index ac0dd8674355..9c321fbc27c4 100644
--- a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
@@ -56,9 +56,9 @@ executable("llvm-tblgen") {
"DisassemblerEmitter.cpp",
"ExegesisEmitter.cpp",
"FastISelEmitter.cpp",
+ "GlobalISelCombinerEmitter.cpp",
"GlobalISelEmitter.cpp",
"GlobalISelMatchTable.cpp",
- "GlobalISelCombinerEmitter.cpp",
"GlobalISelMatchTableExecutorEmitter.cpp",
"InfoByHwMode.cpp",
"InstrDocsEmitter.cpp",
@@ -77,8 +77,8 @@ executable("llvm-tblgen") {
"Types.cpp",
"VarLenCodeEmitterGen.cpp",
"WebAssemblyDisassemblerEmitter.cpp",
+ "X86CompressEVEXTablesEmitter.cpp",
"X86DisassemblerTables.cpp",
- "X86EVEX2VEXTablesEmitter.cpp",
"X86FoldTablesEmitter.cpp",
"X86MnemonicTables.cpp",
"X86ModRMFilters.cpp",
diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp
index 534f0d60e800..ddc0c2516bb3 100644
--- a/mlir/examples/toy/Ch6/toyc.cpp
+++ b/mlir/examples/toy/Ch6/toyc.cpp
@@ -187,8 +187,7 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
// This is necessary to have line tables emitted and basic
// debugger working. In the future we will add proper debug information
// emission directly from our frontend.
- pm.addNestedPass<mlir::LLVM::LLVMFuncOp>(
- mlir::LLVM::createDIScopeForLLVMFuncOpPass());
+ pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass());
}
if (mlir::failed(pm.run(*module)))
diff --git a/mlir/examples/toy/Ch7/toyc.cpp b/mlir/examples/toy/Ch7/toyc.cpp
index e4af0a3a3dce..5eb40b779bcd 100644
--- a/mlir/examples/toy/Ch7/toyc.cpp
+++ b/mlir/examples/toy/Ch7/toyc.cpp
@@ -188,8 +188,7 @@ int loadAndProcessMLIR(mlir::MLIRContext &context,
// This is necessary to have line tables emitted and basic
// debugger working. In the future we will add proper debug information
// emission directly from our frontend.
- pm.addNestedPass<mlir::LLVM::LLVMFuncOp>(
- mlir::LLVM::createDIScopeForLLVMFuncOpPass());
+ pm.addPass(mlir::LLVM::createDIScopeForLLVMFuncOpPass());
}
if (mlir::failed(pm.run(*module)))
diff --git a/mlir/include/mlir/Analysis/Presburger/Barvinok.h b/mlir/include/mlir/Analysis/Presburger/Barvinok.h
new file mode 100644
index 000000000000..15e805860db2
--- /dev/null
+++ b/mlir/include/mlir/Analysis/Presburger/Barvinok.h
@@ -0,0 +1,84 @@
+//===- Barvinok.h - Barvinok's Algorithm ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of Barvinok's algorithm and related utility functions.
+// Currently a work in progress.
+// These include functions to manipulate cones (define a cone object, get its
+// dual, and find its index).
+//
+// The implementation is based on:
+// 1. Barvinok, Alexander, and James E. Pommersheim. "An algorithmic theory of
+// lattice points in polyhedra." New perspectives in algebraic combinatorics
+// 38 (1999): 91-147.
+// 2. Verdoolaege, Sven, et al. "Counting integer points in parametric
+// polytopes using Barvinok's rational functions." Algorithmica 48 (2007):
+// 37-66.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_ANALYSIS_PRESBURGER_BARVINOK_H
+#define MLIR_ANALYSIS_PRESBURGER_BARVINOK_H
+
+#include "mlir/Analysis/Presburger/IntegerRelation.h"
+#include "mlir/Analysis/Presburger/Matrix.h"
+#include <optional>
+
+namespace mlir {
+namespace presburger {
+namespace detail {
+
+/// A polyhedron in H-representation is a set of inequalities
+/// in d variables with integer coefficients.
+using PolyhedronH = IntegerRelation;
+
+/// A polyhedron in V-representation is a set of rays and points, i.e.,
+/// vectors, stored as rows of a matrix.
+using PolyhedronV = IntMatrix;
+
+/// A cone in either representation is a special case of
+/// a polyhedron in that representation.
+using ConeH = PolyhedronH;
+using ConeV = PolyhedronV;
+
+inline ConeH defineHRep(int numVars) {
+ // We don't distinguish between domain and range variables, so
+ // we set the number of domain variables as 0 and the number of
+ // range variables as the number of actual variables.
+ // There are no symbols (we don't work with parametric cones) and no local
+ // (existentially quantified) variables.
+ // Once the cone is defined, we use `addInequality()` to set inequalities.
+ return ConeH(PresburgerSpace::getSetSpace(/*numDims=*/numVars,
+ /*numSymbols=*/0,
+ /*numLocals=*/0));
+}
+
+/// Get the index of a cone, i.e., the volume of the parallelepiped
+/// spanned by its generators, which is equal to the number of integer
+/// points in its fundamental parallelepiped.
+/// If the index is 1, the cone is unimodular.
+/// Barvinok, A., and J. E. Pommersheim. "An algorithmic theory of lattice
+/// points in polyhedra." p. 107 If it has more rays than the dimension, return
+/// 0.
+MPInt getIndex(ConeV cone);
+
+/// Given a cone in H-representation, return its dual. The dual cone is in
+/// V-representation.
+/// This assumes that the input is pointed at the origin; it assert-fails
+/// otherwise.
+ConeV getDual(ConeH cone);
+
+/// Given a cone in V-representation, return its dual. The dual cone is in
+/// H-representation.
+/// The returned cone is pointed at the origin.
+ConeH getDual(ConeV cone);
+
+} // namespace detail
+} // namespace presburger
+} // namespace mlir
+
+#endif // MLIR_ANALYSIS_PRESBURGER_BARVINOK_H
diff --git a/mlir/lib/Analysis/Presburger/GeneratingFunction.h b/mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h
index f7deba921ea5..eaf0449fe8a1 100644
--- a/mlir/lib/Analysis/Presburger/GeneratingFunction.h
+++ b/mlir/include/mlir/Analysis/Presburger/GeneratingFunction.h
@@ -19,9 +19,10 @@
namespace mlir {
namespace presburger {
+namespace detail {
// A parametric point is a vector, each of whose elements
-// is an affine function of n parameters. Each row
+// is an affine function of n parameters. Each column
// in the matrix represents the affine function and
// has n+1 elements.
using ParamPoint = FracMatrix;
@@ -55,7 +56,7 @@ public:
: numParam(numParam), signs(signs), numerators(nums), denominators(dens) {
#ifndef NDEBUG
for (const ParamPoint &term : numerators)
- assert(term.getNumColumns() == numParam + 1 &&
+ assert(term.getNumRows() == numParam + 1 &&
"dimensionality of numerator exponents does not match number of "
"parameters!");
#endif // NDEBUG
@@ -83,7 +84,8 @@ public:
std::vector<std::vector<Point>> sumDenominators = denominators;
sumDenominators.insert(sumDenominators.end(), gf.denominators.begin(),
gf.denominators.end());
- return GeneratingFunction(0, sumSigns, sumNumerators, sumDenominators);
+ return GeneratingFunction(numParam, sumSigns, sumNumerators,
+ sumDenominators);
}
llvm::raw_ostream &print(llvm::raw_ostream &os) const {
@@ -128,6 +130,7 @@ private:
std::vector<std::vector<Point>> denominators;
};
+} // namespace detail
} // namespace presburger
} // namespace mlir
diff --git a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
index 4c6b810f92e9..cd957280eb74 100644
--- a/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
+++ b/mlir/include/mlir/Analysis/Presburger/IntegerRelation.h
@@ -577,6 +577,11 @@ public:
convertVarKind(kind, varStart, varLimit, VarKind::Local);
}
+ /// Merge and align symbol variables of `this` and `other` with respect to
+ /// identifiers. After this operation the symbol variables of both relations
+ /// have the same identifiers in the same order.
+ void mergeAndAlignSymbols(IntegerRelation &other);
+
/// Adds additional local vars to the sets such that they both have the union
/// of the local vars in each set, without changing the set of points that
/// lie in `this` and `other`.
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h
index 85e9c47ad530..e5f3b6d571f4 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h
@@ -73,39 +73,36 @@ private:
AllocEntryList allocs;
};
+/// Finds a common dominator for the given value while taking the positions
+/// of the values in the value set into account. It supports dominator and
+/// post-dominator analyses via template arguments. If no common dominator
+/// can be found, this function will return "nullptr".
+template <typename DominatorT>
+Block *findCommonDominator(Value value,
+ const BufferViewFlowAnalysis::ValueSetT &values,
+ const DominatorT &doms) {
+ // Store blocks in a set before querying `DominanceInfo` to filter out
+ // duplicate blocks (for performance reasons).
+ llvm::SmallPtrSet<Block *, 16> blocks;
+ // Start with the current block the value is defined in.
+ blocks.insert(value.getParentBlock());
+ for (Value childValue : values) {
+ for (Operation *user : childValue.getUsers()) {
+ // Find an appropriate dominator block that takes the current use into
+ // account.
+ blocks.insert(user->getBlock());
+ }
+ // Take values without any users into account.
+ blocks.insert(childValue.getParentBlock());
+ }
+ return doms.findNearestCommonDominator(blocks);
+}
+
/// The base class for all BufferPlacement transformations.
class BufferPlacementTransformationBase {
public:
using ValueSetT = BufferViewFlowAnalysis::ValueSetT;
- /// Finds a common dominator for the given value while taking the positions
- /// of the values in the value set into account. It supports dominator and
- /// post-dominator analyses via template arguments.
- template <typename DominatorT>
- static Block *findCommonDominator(Value value, const ValueSetT &values,
- const DominatorT &doms) {
- // Start with the current block the value is defined in.
- Block *dom = value.getParentBlock();
- // Iterate over all aliases and their uses to find a safe placement block
- // according to the given dominator information.
- for (Value childValue : values) {
- for (Operation *user : childValue.getUsers()) {
- // Move upwards in the dominator tree to find an appropriate
- // dominator block that takes the current use into account.
- dom = doms.findNearestCommonDominator(dom, user->getBlock());
- }
- // Take values without any users into account.
- dom = doms.findNearestCommonDominator(dom, childValue.getParentBlock());
- }
- return dom;
- }
-
- /// Returns true if the given operation represents a loop by testing whether
- /// it implements the `LoopLikeOpInterface` or the `RegionBranchOpInterface`.
- /// In the case of a `RegionBranchOpInterface`, it checks all region-based
- /// control-flow edges for cycles.
- static bool isLoop(Operation *op);
-
/// Constructs a new operation base using the given root operation.
BufferPlacementTransformationBase(Operation *op);
diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
index efef61b5c6e7..8d4a110ee801 100644
--- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
+++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td
@@ -678,6 +678,9 @@ def GPU_LaunchOp : GPU_Op<"launch", [
Arguments<(ins Variadic<GPU_AsyncToken>:$asyncDependencies,
Index:$gridSizeX, Index:$gridSizeY, Index:$gridSizeZ,
Index:$blockSizeX, Index:$blockSizeY, Index:$blockSizeZ,
+ Optional<Index>:$clusterSizeX,
+ Optional<Index>:$clusterSizeY,
+ Optional<Index>:$clusterSizeZ,
Optional<I32>:$dynamicSharedMemorySize)>,
Results<(outs Optional<GPU_AsyncToken>:$asyncToken)> {
let summary = "GPU kernel launch operation";
@@ -700,8 +703,11 @@ def GPU_LaunchOp : GPU_Op<"launch", [
to the amount of dynamic shared memory a kernel's workgroup should be
allocated; when this operand is not present, a zero size is assumed.
- The body region has at least _twelve_ arguments, grouped as follows:
+ The body region has at least _twelve_ arguments, or _eighteen_ if cluster
+ dimensions are present, grouped as follows:
+ - three optional arguments that contain cluster identifiers along x,y,z
+ dimensions;
- three arguments that contain block identifiers along x,y,z dimensions;
- three arguments that contain thread identifiers along x,y,z dimensions;
- operands of the `gpu.launch` operation as is (i.e. the operands for
@@ -713,6 +719,7 @@ def GPU_LaunchOp : GPU_Op<"launch", [
```
operation ::= `gpu.launch` (`async` (`[` ssa-id-list `]`)? )?
+ ( `clusters` `(` ssa-id-list `)` `in` ssa-reassignment )?
`blocks` `(` ssa-id-list `)` `in` ssa-reassignment
`threads` `(` ssa-id-list `)` `in` ssa-reassignment
(dynamic_shared_memory_size ssa-use)?
@@ -763,6 +770,16 @@ def GPU_LaunchOp : GPU_Op<"launch", [
// Assuming %val1 is defined outside the gpu.launch region.
%42 = load %workgroup[%bx] : memref<32xf32, 3>
}
+
+ // Launch with clusters.
+ gpu.launch clusters(%cx, %cy, %cz) in (%sz_cx = %0, %sz_cy = %1, %sz_cz = %2)
+ blocks(%bx, %by, %bz) in (%sz_bx = %3, %sz_by = %4, %sz_bz = %5)
+ threads(%tx, %ty, %tz) in (%sz_tx = %6, %sz_ty = %7, %sz_tz = %8)
+ {
+ // Cluster, block and thread identifiers, as well as cluster/block/grid
+ // sizes are immediately usable inside body region.
+ "some_op"(%cx, %bx, %tx) : (index, index, index) -> ()
+ }
```
Rationale: using operation/block arguments gives analyses a clear way of
@@ -784,7 +801,10 @@ def GPU_LaunchOp : GPU_Op<"launch", [
CArg<"Type", "nullptr">:$asyncTokenType,
CArg<"ValueRange", "{}">:$asyncDependencies,
CArg<"TypeRange", "{}">:$workgroupAttributions,
- CArg<"TypeRange", "{}">:$privateAttributions)>
+ CArg<"TypeRange", "{}">:$privateAttributions,
+ CArg<"Value", "nullptr">:$clusterSizeX,
+ CArg<"Value", "nullptr">:$clusterSizeY,
+ CArg<"Value", "nullptr">:$clusterSizeZ)>
];
let extraClassDeclaration = [{
@@ -792,17 +812,24 @@ def GPU_LaunchOp : GPU_Op<"launch", [
KernelDim3 getBlockIds();
/// Get the SSA values corresponding to kernel thread identifiers.
KernelDim3 getThreadIds();
+ /// Get the SSA values corresponding to kernel cluster identifiers.
+ std::optional<KernelDim3> getClusterIds();
/// Get the SSA values corresponding to kernel grid size.
KernelDim3 getGridSize();
/// Get the SSA values corresponding to kernel block size.
KernelDim3 getBlockSize();
+ /// Get the SSA values corresponding to kernel cluster size.
+ std::optional<KernelDim3> getClusterSize();
/// Get the SSA values passed as operands to specify the grid size.
KernelDim3 getGridSizeOperandValues();
/// Get the SSA values passed as operands to specify the block size.
KernelDim3 getBlockSizeOperandValues();
+ /// Get the SSA values passed as operands to specify the cluster size.
+ std::optional<KernelDim3> getClusterSizeOperandValues();
static StringRef getBlocksKeyword() { return "blocks"; }
+ static StringRef getClustersKeyword() { return "clusters"; }
static StringRef getThreadsKeyword() { return "threads"; }
static StringRef getDynamicSharedMemorySizeKeyword() {
return "dynamic_shared_memory_size";
@@ -816,6 +843,21 @@ def GPU_LaunchOp : GPU_Op<"launch", [
/// placed in the leading positions of the argument list.
static constexpr unsigned kNumConfigRegionAttributes = 12;
+ /// Returns true if cluster size is specified.
+ bool hasClusterSize() {
+ if (getClusterSizeX() && getClusterSizeY() && getClusterSizeZ())
+ return true;
+ return false;
+ }
+ /// Returns the number of operands including cluster size
+ unsigned getNumConfigOperands() {
+ return kNumConfigOperands + (hasClusterSize() ? 3 : 0);
+ }
+ /// Returns the number of region attributes including cluster size
+ unsigned getNumConfigRegionAttributes() {
+ return kNumConfigRegionAttributes + (hasClusterSize() ? 6 : 0);
+ }
+
/// Returns the keywords used in the custom syntax for this Op.
static StringRef getWorkgroupKeyword() { return "workgroup"; }
static StringRef getPrivateKeyword() { return "private"; }
@@ -831,7 +873,7 @@ def GPU_LaunchOp : GPU_Op<"launch", [
/// the workgroup memory
ArrayRef<BlockArgument> getWorkgroupAttributions() {
auto begin =
- std::next(getBody().args_begin(), kNumConfigRegionAttributes);
+ std::next(getBody().args_begin(), getNumConfigRegionAttributes());
auto end = std::next(begin, getNumWorkgroupAttributions());
return {begin, end};
}
@@ -842,7 +884,7 @@ def GPU_LaunchOp : GPU_Op<"launch", [
/// Returns the number of buffers located in the private memory.
unsigned getNumPrivateAttributions() {
- return getBody().getNumArguments() - kNumConfigRegionAttributes -
+ return getBody().getNumArguments() - getNumConfigRegionAttributes() -
getNumWorkgroupAttributions();
}
@@ -853,7 +895,7 @@ def GPU_LaunchOp : GPU_Op<"launch", [
// memory.
auto begin =
std::next(getBody().args_begin(),
- kNumConfigRegionAttributes + getNumWorkgroupAttributions());
+ getNumConfigRegionAttributes() + getNumWorkgroupAttributions());
return {begin, getBody().args_end()};
}
@@ -871,6 +913,7 @@ def GPU_LaunchOp : GPU_Op<"launch", [
let hasCanonicalizer = 1;
let hasCustomAssemblyFormat = 1;
let hasRegionVerifier = 1;
+ let hasVerifier = 1;
}
def GPU_PrintfOp : GPU_Op<"printf", [MemoryEffects<[MemWrite]>]>,
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
index f36ec0d02cf7..86ba9f4d3840 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
@@ -342,6 +342,7 @@ def LLVM_DIBasicTypeAttr : LLVM_Attr<"DIBasicType", "di_basic_type",
def LLVM_DICompileUnitAttr : LLVM_Attr<"DICompileUnit", "di_compile_unit",
/*traits=*/[], "DIScopeAttr"> {
let parameters = (ins
+ "DistinctAttr":$id,
LLVM_DILanguageParameter:$sourceLanguage,
"DIFileAttr":$file,
OptionalParameter<"StringAttr">:$producer,
@@ -516,6 +517,7 @@ def LLVM_DILocalVariableAttr : LLVM_Attr<"DILocalVariable", "di_local_variable",
def LLVM_DISubprogramAttr : LLVM_Attr<"DISubprogram", "di_subprogram",
/*traits=*/[], "DIScopeAttr"> {
let parameters = (ins
+ OptionalParameter<"DistinctAttr">:$id,
OptionalParameter<"DICompileUnitAttr">:$compileUnit,
"DIScopeAttr":$scope,
OptionalParameter<"StringAttr">:$name,
@@ -528,13 +530,13 @@ def LLVM_DISubprogramAttr : LLVM_Attr<"DISubprogram", "di_subprogram",
);
let builders = [
AttrBuilderWithInferredContext<(ins
- "DICompileUnitAttr":$compileUnit, "DIScopeAttr":$scope, "StringRef":$name,
- "StringRef":$linkageName, "DIFileAttr":$file, "unsigned":$line,
- "unsigned":$scopeLine, "DISubprogramFlags":$subprogramFlags,
- "DISubroutineTypeAttr":$type
+ "DistinctAttr":$id, "DICompileUnitAttr":$compileUnit,
+ "DIScopeAttr":$scope, "StringRef":$name, "StringRef":$linkageName,
+ "DIFileAttr":$file, "unsigned":$line, "unsigned":$scopeLine,
+ "DISubprogramFlags":$subprogramFlags, "DISubroutineTypeAttr":$type
), [{
MLIRContext *ctx = file.getContext();
- return $_get(ctx, compileUnit, scope, StringAttr::get(ctx, name),
+ return $_get(ctx, id, compileUnit, scope, StringAttr::get(ctx, name),
StringAttr::get(ctx, linkageName), file, line,
scopeLine, subprogramFlags, type);
}]>
diff --git a/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td b/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td
index 6ebbd08acfc4..0242cfd9abb7 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/Transforms/Passes.td
@@ -66,7 +66,7 @@ def NVVMOptimizeForTarget : Pass<"llvm-optimize-for-nvvm-target"> {
let constructor = "::mlir::NVVM::createOptimizeForTargetPass()";
}
-def DIScopeForLLVMFuncOp : Pass<"ensure-debug-info-scope-on-llvm-func", "LLVM::LLVMFuncOp"> {
+def DIScopeForLLVMFuncOp : Pass<"ensure-debug-info-scope-on-llvm-func", "::mlir::ModuleOp"> {
let summary = "Materialize LLVM debug info subprogram attribute on every LLVMFuncOp";
let description = [{
Having a debug info subprogram attribute on a function is required for
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td
index f2032e940d08..88f902110b86 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td
@@ -14,8 +14,20 @@
#ifndef MLIR_DIALECT_SPIRV_IR_ATOMIC_OPS
#define MLIR_DIALECT_SPIRV_IR_ATOMIC_OPS
-class SPIRV_AtomicUpdateOp<string mnemonic, list<Trait> traits = []> :
- SPIRV_Op<mnemonic, traits> {
+include "mlir/Dialect/SPIRV/IR/SPIRVBase.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
+
+class PointeeTypeMatchTrait<string pointer, string name>
+ : TypesMatchWith<
+ "`" # name # "` type matches pointee type of " # "`" # pointer # "`",
+ pointer, name, "llvm::cast<PointerType>($_self).getPointeeType()">;
+
+// -----
+
+class SPIRV_AtomicUpdateOp<string mnemonic, list<Trait> traits = []>
+ : SPIRV_Op<mnemonic,
+ !listconcat(traits,
+ [PointeeTypeMatchTrait<"pointer", "result">])> {
let arguments = (ins
SPIRV_AnyPtr:$pointer,
SPIRV_ScopeAttr:$memory_scope,
@@ -25,10 +37,19 @@ class SPIRV_AtomicUpdateOp<string mnemonic, list<Trait> traits = []> :
let results = (outs
SPIRV_Integer:$result
);
+
+ let assemblyFormat = [{
+ $memory_scope $semantics operands attr-dict `:` type($pointer)
+ }];
+
+ let hasCustomAssemblyFormat = 0;
}
-class SPIRV_AtomicUpdateWithValueOp<string mnemonic, list<Trait> traits = []> :
- SPIRV_Op<mnemonic, traits> {
+class SPIRV_AtomicUpdateWithValueOp<string mnemonic, list<Trait> traits = []>
+ : SPIRV_Op<mnemonic, !listconcat(traits, [
+ PointeeTypeMatchTrait<"pointer", "value">,
+ PointeeTypeMatchTrait<"pointer", "result">,
+ ])> {
let arguments = (ins
SPIRV_AnyPtr:$pointer,
SPIRV_ScopeAttr:$memory_scope,
@@ -40,11 +61,11 @@ class SPIRV_AtomicUpdateWithValueOp<string mnemonic, list<Trait> traits = []> :
SPIRV_Integer:$result
);
- let builders = [
- OpBuilder<(ins "Value":$pointer, "::mlir::spirv::Scope":$scope,
- "::mlir::spirv::MemorySemantics":$memory, "Value":$value),
- [{build($_builder, $_state, value.getType(), pointer, scope, memory, value);}]>
- ];
+ let assemblyFormat = [{
+ $memory_scope $semantics operands attr-dict `:` type($pointer)
+ }];
+
+ let hasCustomAssemblyFormat = 0;
}
// -----
@@ -73,20 +94,10 @@ def SPIRV_AtomicAndOp : SPIRV_AtomicUpdateWithValueOp<"AtomicAnd", []> {
<!-- End of AutoGen section -->
- ```
- scope ::= `"CrossDevice"` | `"Device"` | `"Workgroup"` | ...
-
- memory-semantics ::= `"None"` | `"Acquire"` | "Release"` | ...
-
- atomic-and-op ::=
- `spirv.AtomicAnd` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicAnd "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicAnd <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -94,7 +105,11 @@ def SPIRV_AtomicAndOp : SPIRV_AtomicUpdateWithValueOp<"AtomicAnd", []> {
// -----
-def SPIRV_AtomicCompareExchangeOp : SPIRV_Op<"AtomicCompareExchange", []> {
+def SPIRV_AtomicCompareExchangeOp : SPIRV_Op<"AtomicCompareExchange", [
+ PointeeTypeMatchTrait<"pointer", "result">,
+ PointeeTypeMatchTrait<"pointer", "value">,
+ PointeeTypeMatchTrait<"pointer", "comparator">,
+]> {
let summary = [{
Perform the following steps atomically with respect to any other atomic
accesses within Scope to the same location:
@@ -129,17 +144,10 @@ def SPIRV_AtomicCompareExchangeOp : SPIRV_Op<"AtomicCompareExchange", []> {
<!-- End of AutoGen section -->
- ```
- atomic-compare-exchange-op ::=
- `spirv.AtomicCompareExchange` scope memory-semantics memory-semantics
- ssa-use `,` ssa-use `,` ssa-use
- `:` spv-pointer-type
- ```
-
#### Example:
```
- %0 = spirv.AtomicCompareExchange "Workgroup" "Acquire" "None"
+ %0 = spirv.AtomicCompareExchange <Workgroup> <Acquire> <None>
%pointer, %value, %comparator
: !spirv.ptr<i32, WorkGroup>
```
@@ -157,11 +165,23 @@ def SPIRV_AtomicCompareExchangeOp : SPIRV_Op<"AtomicCompareExchange", []> {
let results = (outs
SPIRV_Integer:$result
);
+
+ let assemblyFormat = [{
+ $memory_scope $equal_semantics $unequal_semantics operands attr-dict `:`
+ type($pointer)
+ }];
+
+ let hasCustomAssemblyFormat = 0;
+ let hasVerifier = 0;
}
// -----
-def SPIRV_AtomicCompareExchangeWeakOp : SPIRV_Op<"AtomicCompareExchangeWeak", []> {
+def SPIRV_AtomicCompareExchangeWeakOp : SPIRV_Op<"AtomicCompareExchangeWeak", [
+ PointeeTypeMatchTrait<"pointer", "result">,
+ PointeeTypeMatchTrait<"pointer", "value">,
+ PointeeTypeMatchTrait<"pointer", "comparator">,
+]> {
let summary = "Deprecated (use OpAtomicCompareExchange).";
let description = [{
@@ -171,17 +191,10 @@ def SPIRV_AtomicCompareExchangeWeakOp : SPIRV_Op<"AtomicCompareExchangeWeak", []
<!-- End of AutoGen section -->
- ```
- atomic-compare-exchange-weak-op ::=
- `spirv.AtomicCompareExchangeWeak` scope memory-semantics memory-semantics
- ssa-use `,` ssa-use `,` ssa-use
- `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicCompareExchangeWeak "Workgroup" "Acquire" "None"
+ %0 = spirv.AtomicCompareExchangeWeak <Workgroup> <Acquire> <None>
%pointer, %value, %comparator
: !spirv.ptr<i32, WorkGroup>
```
@@ -206,11 +219,22 @@ def SPIRV_AtomicCompareExchangeWeakOp : SPIRV_Op<"AtomicCompareExchangeWeak", []
let results = (outs
SPIRV_Integer:$result
);
+
+ let assemblyFormat = [{
+ $memory_scope $equal_semantics $unequal_semantics operands attr-dict `:`
+ type($pointer)
+ }];
+
+ let hasCustomAssemblyFormat = 0;
+ let hasVerifier = 0;
}
// -----
-def SPIRV_AtomicExchangeOp : SPIRV_Op<"AtomicExchange", []> {
+def SPIRV_AtomicExchangeOp : SPIRV_Op<"AtomicExchange", [
+ PointeeTypeMatchTrait<"pointer", "value">,
+ PointeeTypeMatchTrait<"pointer", "result">,
+]> {
let summary = [{
Perform the following steps atomically with respect to any other atomic
accesses within Scope to the same location:
@@ -234,16 +258,10 @@ def SPIRV_AtomicExchangeOp : SPIRV_Op<"AtomicExchange", []> {
<!-- End of AutoGen section -->
- ```
- atomic-exchange-op ::=
- `spirv.AtomicCompareExchange` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicExchange "Workgroup" "Acquire" %pointer, %value,
+ %0 = spirv.AtomicExchange <Workgroup> <Acquire> %pointer, %value,
: !spirv.ptr<i32, WorkGroup>
```
}];
@@ -258,11 +276,21 @@ def SPIRV_AtomicExchangeOp : SPIRV_Op<"AtomicExchange", []> {
let results = (outs
SPIRV_Numerical:$result
);
+
+ let assemblyFormat = [{
+ $memory_scope $semantics operands attr-dict `:` type($pointer)
+ }];
+
+ let hasCustomAssemblyFormat = 0;
+ let hasVerifier = 0;
}
// -----
-def SPIRV_EXTAtomicFAddOp : SPIRV_ExtVendorOp<"AtomicFAdd", []> {
+def SPIRV_EXTAtomicFAddOp : SPIRV_ExtVendorOp<"AtomicFAdd", [
+ PointeeTypeMatchTrait<"pointer", "result">,
+ PointeeTypeMatchTrait<"pointer", "value">,
+]> {
let summary = "TBD";
let description = [{
@@ -288,16 +316,10 @@ def SPIRV_EXTAtomicFAddOp : SPIRV_ExtVendorOp<"AtomicFAdd", []> {
Memory must be a valid memory Scope.
- ```
- atomic-fadd-op ::=
- `spirv.EXT.AtomicFAdd` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.EXT.AtomicFAdd "Device" "None" %pointer, %value :
+ %0 = spirv.EXT.AtomicFAdd <Device> <None> %pointer, %value :
!spirv.ptr<f32, StorageBuffer>
```
}];
@@ -319,6 +341,10 @@ def SPIRV_EXTAtomicFAddOp : SPIRV_ExtVendorOp<"AtomicFAdd", []> {
let results = (outs
SPIRV_Float:$result
);
+
+ let assemblyFormat = [{
+ $memory_scope $semantics operands attr-dict `:` type($pointer)
+ }];
}
// -----
@@ -347,16 +373,10 @@ def SPIRV_AtomicIAddOp : SPIRV_AtomicUpdateWithValueOp<"AtomicIAdd", []> {
<!-- End of AutoGen section -->
- ```
- atomic-iadd-op ::=
- `spirv.AtomicIAdd` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicIAdd "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicIAdd <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -387,16 +407,10 @@ def SPIRV_AtomicIDecrementOp : SPIRV_AtomicUpdateOp<"AtomicIDecrement", []> {
<!-- End of AutoGen section -->
- ```
- atomic-idecrement-op ::=
- `spirv.AtomicIDecrement` scope memory-semantics ssa-use
- `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicIDecrement "Device" "None" %pointer :
+ %0 = spirv.AtomicIDecrement <Device> <None> %pointer :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -426,16 +440,10 @@ def SPIRV_AtomicIIncrementOp : SPIRV_AtomicUpdateOp<"AtomicIIncrement", []> {
<!-- End of AutoGen section -->
- ```
- atomic-iincrement-op ::=
- `spirv.AtomicIIncrement` scope memory-semantics ssa-use
- `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicIncrement "Device" "None" %pointer :
+ %0 = spirv.AtomicIncrement <Device> <None> %pointer :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -468,16 +476,10 @@ def SPIRV_AtomicISubOp : SPIRV_AtomicUpdateWithValueOp<"AtomicISub", []> {
<!-- End of AutoGen section -->
- ```
- atomic-isub-op ::=
- `spirv.AtomicISub` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicISub "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicISub <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -509,16 +511,10 @@ def SPIRV_AtomicOrOp : SPIRV_AtomicUpdateWithValueOp<"AtomicOr", []> {
<!-- End of AutoGen section -->
- ```
- atomic-or-op ::=
- `spirv.AtomicOr` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicOr "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicOr <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -551,16 +547,10 @@ def SPIRV_AtomicSMaxOp : SPIRV_AtomicUpdateWithValueOp<"AtomicSMax", []> {
<!-- End of AutoGen section -->
- ```
- atomic-smax-op ::=
- `spirv.AtomicSMax` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicSMax "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicSMax <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -593,16 +583,10 @@ def SPIRV_AtomicSMinOp : SPIRV_AtomicUpdateWithValueOp<"AtomicSMin", []> {
<!-- End of AutoGen section -->
- ```
- atomic-smin-op ::=
- `spirv.AtomicSMin` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicSMin "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicSMin <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -635,16 +619,10 @@ def SPIRV_AtomicUMaxOp : SPIRV_AtomicUpdateWithValueOp<"AtomicUMax", [UnsignedOp
<!-- End of AutoGen section -->
- ```
- atomic-umax-op ::=
- `spirv.AtomicUMax` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicUMax "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicUMax <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -677,16 +655,10 @@ def SPIRV_AtomicUMinOp : SPIRV_AtomicUpdateWithValueOp<"AtomicUMin", [UnsignedOp
<!-- End of AutoGen section -->
- ```
- atomic-umin-op ::=
- `spirv.AtomicUMin` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicUMin "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicUMin <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
@@ -719,16 +691,10 @@ def SPIRV_AtomicXorOp : SPIRV_AtomicUpdateWithValueOp<"AtomicXor", []> {
<!-- End of AutoGen section -->
- ```
- atomic-xor-op ::=
- `spirv.AtomicXor` scope memory-semantics
- ssa-use `,` ssa-use `:` spv-pointer-type
- ```
-
#### Example:
```mlir
- %0 = spirv.AtomicXor "Device" "None" %pointer, %value :
+ %0 = spirv.AtomicXor <Device> <None> %pointer, %value :
!spirv.ptr<i32, StorageBuffer>
```
}];
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
index 5fd25e3b576f..fbf750d64303 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
@@ -267,6 +267,15 @@ def SPIRV_FuncOp : SPIRV_Op<"func", [
This op itself takes no operands and generates no results. Its region
can take zero or more arguments and return zero or one values.
+ From `SPV_KHR_physical_storage_buffer`:
+ If a parameter of function is
+ - a pointer (or contains a pointer) in the PhysicalStorageBuffer storage
+ class, the function parameter must be decorated with exactly one of
+ `Aliased` or `Restrict`.
+ - a pointer (or contains a pointer) and the type it points to is a pointer
+ in the PhysicalStorageBuffer storage class, the function parameter must
+ be decorated with exactly one of `AliasedPointer` or `RestrictPointer`.
+
<!-- End of AutoGen section -->
```
@@ -280,6 +289,20 @@ def SPIRV_FuncOp : SPIRV_Op<"func", [
```mlir
spirv.func @foo() -> () "None" { ... }
spirv.func @bar() -> () "Inline|Pure" { ... }
+
+ spirv.func @aliased_pointer(%arg0: !spirv.ptr<i32, PhysicalStorageBuffer>,
+ { spirv.decoration = #spirv.decoration<Aliased> }) -> () "None" { ... }
+
+ spirv.func @restrict_pointer(%arg0: !spirv.ptr<i32, PhysicalStorageBuffer>,
+ { spirv.decoration = #spirv.decoration<Restrict> }) -> () "None" { ... }
+
+ spirv.func @aliased_pointee(%arg0: !spirv.ptr<!spirv.ptr<i32,
+ PhysicalStorageBuffer>, Generic> { spirv.decoration =
+ #spirv.decoration<AliasedPointer> }) -> () "None" { ... }
+
+ spirv.func @restrict_pointee(%arg0: !spirv.ptr<!spirv.ptr<i32,
+ PhysicalStorageBuffer>, Generic> { spirv.decoration =
+ #spirv.decoration<RestrictPointer> }) -> () "None" { ... }
```
}];
diff --git a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
index b426465c5192..ebf95f90f374 100644
--- a/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
+++ b/mlir/include/mlir/ExecutionEngine/RunnerUtils.h
@@ -217,14 +217,14 @@ void printMemRefShape(UnrankedMemRefType<T> &m) {
template <typename T>
void printMemRef(const DynamicMemRefType<T> &m) {
printMemRefMetaData(std::cout, m);
- std::cout << " data = " << std::endl;
+ std::cout << " data = \n";
if (m.rank == 0)
std::cout << "[";
MemRefDataPrinter<T>::print(std::cout, m.data, m.rank, m.rank, m.offset,
m.sizes, m.strides);
if (m.rank == 0)
std::cout << "]";
- std::cout << std::endl;
+ std::cout << '\n' << std::flush;
}
template <typename T, int N>
diff --git a/mlir/include/mlir/IR/Dominance.h b/mlir/include/mlir/IR/Dominance.h
index 82bf34c1910e..2536ce585b3f 100644
--- a/mlir/include/mlir/IR/Dominance.h
+++ b/mlir/include/mlir/IR/Dominance.h
@@ -54,6 +54,21 @@ public:
/// nullptr.
Block *findNearestCommonDominator(Block *a, Block *b) const;
+ /// Finds the nearest common dominator block for the given range of blocks.
+ /// If no common dominator can be found, this function will return nullptr.
+ template <typename BlockRangeT>
+ Block *findNearestCommonDominator(BlockRangeT &&blocks) const {
+ if (blocks.begin() == blocks.end())
+ return nullptr;
+ Block *dom = *blocks.begin();
+ for (auto it = ++blocks.begin(); it != blocks.end(); ++it) {
+ dom = findNearestCommonDominator(dom, *it);
+ if (!dom)
+ return nullptr;
+ }
+ return dom;
+ }
+
/// Get the root dominance node of the given region. Note that this operation
/// is only defined for multi-block regions!
DominanceInfoNode *getRootNode(Region *region) {
diff --git a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td
index 120ddf01ebce..95ac5dea243a 100644
--- a/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td
+++ b/mlir/include/mlir/Interfaces/ControlFlowInterfaces.td
@@ -272,6 +272,10 @@ def RegionBranchOpInterface : OpInterface<"RegionBranchOpInterface"> {
/// eventually branch back to the same region. (Maybe after passing through
/// other regions.)
bool isRepetitiveRegion(unsigned index);
+
+ /// Return `true` if there is a loop in the region branching graph. Only
+ /// reachable regions (starting from the entry regions) are considered.
+ bool hasLoop();
}];
}
diff --git a/mlir/lib/Analysis/Presburger/Barvinok.cpp b/mlir/lib/Analysis/Presburger/Barvinok.cpp
new file mode 100644
index 000000000000..9152b66968a1
--- /dev/null
+++ b/mlir/lib/Analysis/Presburger/Barvinok.cpp
@@ -0,0 +1,65 @@
+//===- Barvinok.cpp - Barvinok's Algorithm ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Analysis/Presburger/Barvinok.h"
+
+using namespace mlir;
+using namespace presburger;
+using namespace mlir::presburger::detail;
+
+/// Assuming that the input cone is pointed at the origin,
+/// converts it to its dual in V-representation.
+/// Essentially we just remove the all-zeroes constant column.
+ConeV mlir::presburger::detail::getDual(ConeH cone) {
+ unsigned numIneq = cone.getNumInequalities();
+ unsigned numVar = cone.getNumCols() - 1;
+ ConeV dual(numIneq, numVar, 0, 0);
+ // Assuming that an inequality of the form
+ // a1*x1 + ... + an*xn + b ≥ 0
+ // is represented as a row [a1, ..., an, b]
+ // and that b = 0.
+
+ for (unsigned i = 0; i < numIneq; ++i) {
+ assert(cone.atIneq(i, numVar) == 0 &&
+ "H-representation of cone is not centred at the origin!");
+ for (unsigned j = 0; j < numVar; ++j) {
+ dual.at(i, j) = cone.atIneq(i, j);
+ }
+ }
+
+ // Now dual is of the form [ [a1, ..., an] , ... ]
+ // which is the V-representation of the dual.
+ return dual;
+}
+
+/// Converts a cone in V-representation to the H-representation
+/// of its dual, pointed at the origin (not at the original vertex).
+/// Essentially adds a column consisting only of zeroes to the end.
+ConeH mlir::presburger::detail::getDual(ConeV cone) {
+ unsigned rows = cone.getNumRows();
+ unsigned columns = cone.getNumColumns();
+ ConeH dual = defineHRep(columns);
+ // Add a new column (for constants) at the end.
+ // This will be initialized to zero.
+ cone.insertColumn(columns);
+
+ for (unsigned i = 0; i < rows; ++i)
+ dual.addInequality(cone.getRow(i));
+
+ // Now dual is of the form [ [a1, ..., an, 0] , ... ]
+ // which is the H-representation of the dual.
+ return dual;
+}
+
+/// Find the index of a cone in V-representation.
+MPInt mlir::presburger::detail::getIndex(ConeV cone) {
+ if (cone.getNumRows() > cone.getNumColumns())
+ return MPInt(0);
+
+ return cone.determinant();
+}
diff --git a/mlir/lib/Analysis/Presburger/CMakeLists.txt b/mlir/lib/Analysis/Presburger/CMakeLists.txt
index e77e1623dae1..83d0514c9e7d 100644
--- a/mlir/lib/Analysis/Presburger/CMakeLists.txt
+++ b/mlir/lib/Analysis/Presburger/CMakeLists.txt
@@ -1,4 +1,5 @@
add_mlir_library(MLIRPresburger
+ Barvinok.cpp
IntegerRelation.cpp
LinearTransform.cpp
Matrix.cpp
diff --git a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
index 0109384f1689..f78e21ccd38e 100644
--- a/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
+++ b/mlir/lib/Analysis/Presburger/IntegerRelation.cpp
@@ -1288,6 +1288,40 @@ void IntegerRelation::eliminateRedundantLocalVar(unsigned posA, unsigned posB) {
removeVar(posB);
}
+/// mergeAndAlignSymbols's implementation can be broken down into two steps:
+/// 1. Merge and align identifiers into `other` from `this. If an identifier
+/// from `this` exists in `other` then we align it. Otherwise, we assume it is a
+/// new identifier and insert it into `other` in the same position as `this`.
+/// 2. Add identifiers that are in `other` but not `this to `this`.
+void IntegerRelation::mergeAndAlignSymbols(IntegerRelation &other) {
+ assert(space.isUsingIds() && other.space.isUsingIds() &&
+ "both relations need to have identifers to merge and align");
+
+ unsigned i = 0;
+ for (const Identifier identifier : space.getIds(VarKind::Symbol)) {
+ // Search in `other` starting at position `i` since the left of `i` is
+ // aligned.
+ const Identifier *findBegin =
+ other.space.getIds(VarKind::Symbol).begin() + i;
+ const Identifier *findEnd = other.space.getIds(VarKind::Symbol).end();
+ const Identifier *itr = std::find(findBegin, findEnd, identifier);
+ if (itr != findEnd) {
+ other.swapVar(other.getVarKindOffset(VarKind::Symbol) + i,
+ other.getVarKindOffset(VarKind::Symbol) + i +
+ std::distance(findBegin, itr));
+ } else {
+ other.insertVar(VarKind::Symbol, i);
+ other.space.getId(VarKind::Symbol, i) = identifier;
+ }
+ ++i;
+ }
+
+ for (unsigned e = other.getNumVarKind(VarKind::Symbol); i < e; ++i) {
+ insertVar(VarKind::Symbol, i);
+ space.getId(VarKind::Symbol, i) = other.space.getId(VarKind::Symbol, i);
+ }
+}
+
/// Adds additional local ids to the sets such that they both have the union
/// of the local ids in each set, without changing the set of points that
/// lie in `this` and `other`.
diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
index 6a005e67ca95..eeb8fbbb180b 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp
@@ -85,8 +85,26 @@ GPUFuncOpLowering::matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
// Add a dialect specific kernel attribute in addition to GPU kernel
// attribute. The former is necessary for further translation while the
// latter is expected by gpu.launch_func.
- if (gpuFuncOp.isKernel())
+ if (gpuFuncOp.isKernel()) {
attributes.emplace_back(kernelAttributeName, rewriter.getUnitAttr());
+
+ // Set the block size attribute if it is present.
+ if (kernelBlockSizeAttributeName.has_value()) {
+ std::optional<int32_t> dimX =
+ gpuFuncOp.getKnownBlockSize(gpu::Dimension::x);
+ std::optional<int32_t> dimY =
+ gpuFuncOp.getKnownBlockSize(gpu::Dimension::y);
+ std::optional<int32_t> dimZ =
+ gpuFuncOp.getKnownBlockSize(gpu::Dimension::z);
+ if (dimX.has_value() || dimY.has_value() || dimZ.has_value()) {
+ // If any of the dimensions are missing, fill them in with 1.
+ attributes.emplace_back(
+ kernelBlockSizeAttributeName.value(),
+ rewriter.getI32ArrayAttr(
+ {dimX.value_or(1), dimY.value_or(1), dimZ.value_or(1)}));
+ }
+ }
+ }
auto llvmFuncOp = rewriter.create<LLVM::LLVMFuncOp>(
gpuFuncOp.getLoc(), gpuFuncOp.getName(), funcType,
LLVM::Linkage::External, /*dsoLocal=*/false, /*cconv=*/LLVM::CConv::C,
diff --git a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h
index a77db4a036ba..471a688e8546 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h
+++ b/mlir/lib/Conversion/GPUCommon/GPUOpsLowering.h
@@ -36,13 +36,15 @@ private:
};
struct GPUFuncOpLowering : ConvertOpToLLVMPattern<gpu::GPUFuncOp> {
- GPUFuncOpLowering(const LLVMTypeConverter &converter,
- unsigned allocaAddrSpace, unsigned workgroupAddrSpace,
- StringAttr kernelAttributeName)
+ GPUFuncOpLowering(
+ const LLVMTypeConverter &converter, unsigned allocaAddrSpace,
+ unsigned workgroupAddrSpace, StringAttr kernelAttributeName,
+ std::optional<StringAttr> kernelBlockSizeAttributeName = std::nullopt)
: ConvertOpToLLVMPattern<gpu::GPUFuncOp>(converter),
allocaAddrSpace(allocaAddrSpace),
workgroupAddrSpace(workgroupAddrSpace),
- kernelAttributeName(kernelAttributeName) {}
+ kernelAttributeName(kernelAttributeName),
+ kernelBlockSizeAttributeName(kernelBlockSizeAttributeName) {}
LogicalResult
matchAndRewrite(gpu::GPUFuncOp gpuFuncOp, OpAdaptor adaptor,
@@ -56,6 +58,9 @@ private:
/// The attribute name to use instead of `gpu.kernel`.
StringAttr kernelAttributeName;
+
+ /// The attribute name to to set block size
+ std::optional<StringAttr> kernelBlockSizeAttributeName;
};
/// The lowering of gpu.printf to a call to HIP hostcalls
diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index e60fe5cbd760..a7ac2332961a 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -352,7 +352,9 @@ void mlir::populateGpuToNVVMConversionPatterns(LLVMTypeConverter &converter,
/*workgroupAddrSpace=*/
static_cast<unsigned>(NVVM::NVVMMemorySpace::kSharedMemorySpace),
StringAttr::get(&converter.getContext(),
- NVVM::NVVMDialect::getKernelFuncAttrName()));
+ NVVM::NVVMDialect::getKernelFuncAttrName()),
+ StringAttr::get(&converter.getContext(),
+ NVVM::NVVMDialect::getMaxntidAttrName()));
populateOpPatterns<math::AbsFOp>(converter, patterns, "__nv_fabsf",
"__nv_fabs");
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp
index 9f5d6a466780..9dc2f262a511 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferOptimizations.cpp
@@ -40,6 +40,25 @@ static bool isKnownControlFlowInterface(Operation *op) {
return isa<LoopLikeOpInterface, RegionBranchOpInterface>(op);
}
+/// Returns true if the given operation represents a loop by testing whether it
+/// implements the `LoopLikeOpInterface` or the `RegionBranchOpInterface`. In
+/// the case of a `RegionBranchOpInterface`, it checks all region-based control-
+/// flow edges for cycles.
+static bool isLoop(Operation *op) {
+ // If the operation implements the `LoopLikeOpInterface` it can be considered
+ // a loop.
+ if (isa<LoopLikeOpInterface>(op))
+ return true;
+
+ // If the operation does not implement the `RegionBranchOpInterface`, it is
+ // (currently) not possible to detect a loop.
+ auto regionInterface = dyn_cast<RegionBranchOpInterface>(op);
+ if (!regionInterface)
+ return false;
+
+ return regionInterface.hasLoop();
+}
+
/// Returns true if the given operation implements the AllocationOpInterface
/// and it supports the dominate block hoisting.
static bool allowAllocDominateBlockHoisting(Operation *op) {
@@ -115,8 +134,7 @@ static bool hasAllocationScope(Value alloc,
// Check if the operation is a known control flow interface and break the
// loop to avoid transformation in loops. Furthermore skip transformation
// if the operation does not implement a RegionBeanchOpInterface.
- if (BufferPlacementTransformationBase::isLoop(parentOp) ||
- !isKnownControlFlowInterface(parentOp))
+ if (isLoop(parentOp) || !isKnownControlFlowInterface(parentOp))
break;
}
} while ((region = region->getParentRegion()));
@@ -290,9 +308,7 @@ struct BufferAllocationHoistingState : BufferAllocationHoistingStateBase {
}
/// Returns true if the given operation does not represent a loop.
- bool isLegalPlacement(Operation *op) {
- return !BufferPlacementTransformationBase::isLoop(op);
- }
+ bool isLegalPlacement(Operation *op) { return !isLoop(op); }
/// Returns true if the given operation should be considered for hoisting.
static bool shouldHoistOpType(Operation *op) {
@@ -327,7 +343,7 @@ struct BufferAllocationLoopHoistingState : BufferAllocationHoistingStateBase {
/// given loop operation. If this is the case, it indicates that the
/// allocation is passed via a back edge.
bool isLegalPlacement(Operation *op) {
- return BufferPlacementTransformationBase::isLoop(op) &&
+ return isLoop(op) &&
!dominators->dominates(aliasDominatorBlock, op->getBlock());
}
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp
index 119801f9cc92..8fffdbf664c3 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp
@@ -96,53 +96,6 @@ BufferPlacementTransformationBase::BufferPlacementTransformationBase(
Operation *op)
: aliases(op), allocs(op), liveness(op) {}
-/// Returns true if the given operation represents a loop by testing whether it
-/// implements the `LoopLikeOpInterface` or the `RegionBranchOpInterface`. In
-/// the case of a `RegionBranchOpInterface`, it checks all region-based control-
-/// flow edges for cycles.
-bool BufferPlacementTransformationBase::isLoop(Operation *op) {
- // If the operation implements the `LoopLikeOpInterface` it can be considered
- // a loop.
- if (isa<LoopLikeOpInterface>(op))
- return true;
-
- // If the operation does not implement the `RegionBranchOpInterface`, it is
- // (currently) not possible to detect a loop.
- RegionBranchOpInterface regionInterface;
- if (!(regionInterface = dyn_cast<RegionBranchOpInterface>(op)))
- return false;
-
- // Recurses into a region using the current region interface to find potential
- // cycles.
- SmallPtrSet<Region *, 4> visitedRegions;
- std::function<bool(Region *)> recurse = [&](Region *current) {
- if (!current)
- return false;
- // If we have found a back edge, the parent operation induces a loop.
- if (!visitedRegions.insert(current).second)
- return true;
- // Recurses into all region successors.
- SmallVector<RegionSuccessor, 2> successors;
- regionInterface.getSuccessorRegions(current, successors);
- for (RegionSuccessor &regionEntry : successors)
- if (recurse(regionEntry.getSuccessor()))
- return true;
- return false;
- };
-
- // Start with all entry regions and test whether they induce a loop.
- SmallVector<RegionSuccessor, 2> successorRegions;
- regionInterface.getSuccessorRegions(/*point=*/RegionBranchPoint::parent(),
- successorRegions);
- for (RegionSuccessor &regionEntry : successorRegions) {
- if (recurse(regionEntry.getSuccessor()))
- return true;
- visitedRegions.clear();
- }
-
- return false;
-}
-
//===----------------------------------------------------------------------===//
// BufferPlacementTransformationBase
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
index 98a60a48763a..88ef1b639fc5 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/BufferViewFlowAnalysis.cpp
@@ -49,7 +49,7 @@ void BufferViewFlowAnalysis::rename(Value from, Value to) {
dependencies[to] = dependencies[from];
dependencies.erase(from);
- for (auto &[key, value] : dependencies) {
+ for (auto &[_, value] : dependencies) {
if (value.contains(from)) {
value.insert(to);
value.erase(from);
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp b/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp
index 4a418a05e6ff..eba1273b36e2 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/EmptyTensorElimination.cpp
@@ -53,10 +53,9 @@ neededValuesDominateInsertionPoint(const DominanceInfo &domInfo,
static bool insertionPointDominatesUses(const DominanceInfo &domInfo,
Operation *insertionPoint,
Operation *emptyTensorOp) {
- for (Operation *user : emptyTensorOp->getUsers())
- if (!domInfo.dominates(insertionPoint, user))
- return false;
- return true;
+ return llvm::all_of(emptyTensorOp->getUsers(), [&](Operation *user) {
+ return domInfo.dominates(insertionPoint, user);
+ });
}
/// Find a valid insertion point for a replacement of `emptyTensorOp`, assuming
diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
index dd482f305fcb..020900934c9f 100644
--- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
+++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp
@@ -646,7 +646,8 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
Value getBlockSizeZ, Value dynamicSharedMemorySize,
Type asyncTokenType, ValueRange asyncDependencies,
TypeRange workgroupAttributions,
- TypeRange privateAttributions) {
+ TypeRange privateAttributions, Value clusterSizeX,
+ Value clusterSizeY, Value clusterSizeZ) {
// Add a WorkGroup attribution attribute. This attribute is required to
// identify private attributions in the list of block argguments.
result.addAttribute(getNumWorkgroupAttributionsAttrName(),
@@ -660,6 +661,12 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
// Add grid and block sizes as op operands, followed by the data operands.
result.addOperands({gridSizeX, gridSizeY, gridSizeZ, getBlockSizeX,
getBlockSizeY, getBlockSizeZ});
+ if (clusterSizeX)
+ result.addOperands(clusterSizeX);
+ if (clusterSizeY)
+ result.addOperands(clusterSizeY);
+ if (clusterSizeZ)
+ result.addOperands(clusterSizeZ);
if (dynamicSharedMemorySize)
result.addOperands(dynamicSharedMemorySize);
@@ -678,9 +685,12 @@ void LaunchOp::build(OpBuilder &builder, OperationState &result,
body->addArgument(argTy, result.location);
kernelRegion->push_back(body);
// Fill OperandSegmentSize Attribute.
- SmallVector<int32_t, 8> segmentSizes(8, 1);
+ SmallVector<int32_t, 11> segmentSizes(11, 1);
segmentSizes.front() = asyncDependencies.size();
segmentSizes.back() = dynamicSharedMemorySize ? 1 : 0;
+ segmentSizes[7] = clusterSizeX ? 1 : 0;
+ segmentSizes[8] = clusterSizeY ? 1 : 0;
+ segmentSizes[9] = clusterSizeZ ? 1 : 0;
result.addAttribute(getOperandSegmentSizeAttr(),
builder.getDenseI32ArrayAttr(segmentSizes));
}
@@ -709,6 +719,22 @@ KernelDim3 LaunchOp::getBlockSize() {
return KernelDim3{args[9], args[10], args[11]};
}
+std::optional<KernelDim3> LaunchOp::getClusterIds() {
+ assert(!getBody().empty() && "LaunchOp body must not be empty.");
+ if (!hasClusterSize())
+ return std::nullopt;
+ auto args = getBody().getArguments();
+ return KernelDim3{args[12], args[13], args[14]};
+}
+
+std::optional<KernelDim3> LaunchOp::getClusterSize() {
+ assert(!getBody().empty() && "LaunchOp body must not be empty.");
+ if (!hasClusterSize())
+ return std::nullopt;
+ auto args = getBody().getArguments();
+ return KernelDim3{args[15], args[16], args[17]};
+}
+
KernelDim3 LaunchOp::getGridSizeOperandValues() {
auto operands = getOperands().drop_front(getAsyncDependencies().size());
return KernelDim3{operands[0], operands[1], operands[2]};
@@ -719,6 +745,20 @@ KernelDim3 LaunchOp::getBlockSizeOperandValues() {
return KernelDim3{operands[3], operands[4], operands[5]};
}
+std::optional<KernelDim3> LaunchOp::getClusterSizeOperandValues() {
+ auto operands = getOperands().drop_front(getAsyncDependencies().size());
+ if (!hasClusterSize())
+ return std::nullopt;
+ return KernelDim3{operands[6], operands[7], operands[8]};
+}
+
+LogicalResult LaunchOp::verify() {
+ if (!(hasClusterSize()) &&
+ (getClusterSizeX() || getClusterSizeY() || getClusterSizeZ()))
+ return emitOpError() << "cluster size must be all present";
+ return success();
+}
+
LogicalResult LaunchOp::verifyRegions() {
// Kernel launch takes kNumConfigOperands leading operands for grid/block
// sizes and transforms them into kNumConfigRegionAttributes region arguments
@@ -778,6 +818,12 @@ void LaunchOp::print(OpAsmPrinter &p) {
p << " [" << getAsyncDependencies() << ']';
}
// Print the launch configuration.
+ if (hasClusterSize()) {
+ p << ' ' << getClustersKeyword();
+ printSizeAssignment(p, getClusterSize().value(),
+ getClusterSizeOperandValues().value(),
+ getClusterIds().value());
+ }
p << ' ' << getBlocksKeyword();
printSizeAssignment(p, getGridSize(), getGridSizeOperandValues(),
getBlockIds());
@@ -831,6 +877,7 @@ parseSizeAssignment(OpAsmParser &parser,
/// Parses a Launch operation.
/// operation ::= `gpu.launch` (`async` `[` ssa-id-list `]`)?
+/// `clusters` `(` ssa-id-list `)` `in` ssa-reassignment (Optional)
/// `blocks` `(` ssa-id-list `)` `in` ssa-reassignment
/// `threads` `(` ssa-id-list `)` `in` ssa-reassignment
/// memory-attribution
@@ -840,7 +887,6 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
// Sizes of the grid and block.
SmallVector<OpAsmParser::UnresolvedOperand, LaunchOp::kNumConfigOperands>
sizes(LaunchOp::kNumConfigOperands);
- MutableArrayRef<OpAsmParser::UnresolvedOperand> sizesRef(sizes);
// Actual (data) operands passed to the kernel.
SmallVector<OpAsmParser::UnresolvedOperand, 4> dataOperands;
@@ -848,7 +894,6 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
// Region arguments to be created.
SmallVector<OpAsmParser::UnresolvedOperand, 16> regionArgs(
LaunchOp::kNumConfigRegionAttributes);
- MutableArrayRef<OpAsmParser::UnresolvedOperand> regionArgsRef(regionArgs);
// Parse optional async dependencies.
SmallVector<OpAsmParser::UnresolvedOperand, 4> asyncDependencies;
@@ -861,6 +906,24 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
if (parser.getNumResults() > 0)
result.types.push_back(asyncTokenType);
+ bool hasCluster = false;
+ if (succeeded(
+ parser.parseOptionalKeyword(LaunchOp::getClustersKeyword().data()))) {
+ hasCluster = true;
+ sizes.resize(9);
+ regionArgs.resize(18);
+ }
+ MutableArrayRef<OpAsmParser::UnresolvedOperand> sizesRef(sizes);
+ MutableArrayRef<OpAsmParser::UnresolvedOperand> regionArgsRef(regionArgs);
+
+ // Last three segment assigns the cluster size. In the region argument
+ // list, this is last 6 arguments.
+ if (hasCluster) {
+ if (parseSizeAssignment(parser, sizesRef.drop_front(6),
+ regionArgsRef.slice(15, 3),
+ regionArgsRef.slice(12, 3)))
+ return failure();
+ }
// Parse the size assignment segments: the first segment assigns grid sizes
// and defines values for block identifiers; the second segment assigns block
// sizes and defines values for thread identifiers. In the region argument
@@ -898,7 +961,7 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
// LaunchOp::getNumWorkgroupAttributionsAttrName().
Type index = parser.getBuilder().getIndexType();
SmallVector<Type, LaunchOp::kNumConfigRegionAttributes> dataTypes(
- LaunchOp::kNumConfigRegionAttributes, index);
+ LaunchOp::kNumConfigRegionAttributes + 6, index);
SmallVector<OpAsmParser::Argument> regionArguments;
for (auto ssaValueAndType : llvm::zip(regionArgs, dataTypes)) {
@@ -916,8 +979,9 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
// Store the number of operands we just parsed as the number of workgroup
// memory attributions.
- unsigned numWorkgroupAttrs =
- regionArguments.size() - LaunchOp::kNumConfigRegionAttributes;
+ unsigned numWorkgroupAttrs = regionArguments.size() -
+ LaunchOp::kNumConfigRegionAttributes -
+ (hasCluster ? 6 : 0);
result.addAttribute(LaunchOp::getNumWorkgroupAttributionsAttrName(),
builder.getI64IntegerAttr(numWorkgroupAttrs));
@@ -934,8 +998,14 @@ ParseResult LaunchOp::parse(OpAsmParser &parser, OperationState &result) {
parser.parseOptionalAttrDict(result.attributes))
return failure();
- SmallVector<int32_t, 8> segmentSizes(8, 1);
+ SmallVector<int32_t, 11> segmentSizes(11, 1);
segmentSizes.front() = asyncDependencies.size();
+
+ if (!hasCluster) {
+ segmentSizes[7] = 0;
+ segmentSizes[8] = 0;
+ segmentSizes[9] = 0;
+ }
segmentSizes.back() = hasDynamicSharedMemorySize ? 1 : 0;
result.addAttribute(LaunchOp::getOperandSegmentSizeAttr(),
parser.getBuilder().getDenseI32ArrayAttr(segmentSizes));
@@ -992,7 +1062,7 @@ BlockArgument LaunchOp::addWorkgroupAttribution(Type type, Location loc) {
(*this)->setAttr(attrName,
IntegerAttr::get(attr.getType(), attr.getValue() + 1));
return getBody().insertArgument(
- LaunchOp::kNumConfigRegionAttributes + attr.getInt(), type, loc);
+ LaunchOp::getNumConfigRegionAttributes() + attr.getInt(), type, loc);
}
/// Adds a new block argument that corresponds to buffers located in
diff --git a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
index 7432a58f18b4..2436113dc423 100644
--- a/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
@@ -49,15 +49,21 @@ static void createForAllDimensions(OpBuilder &builder, Location loc,
/// entry block of `launchOpBody`, to the corresponding result value of the
/// added operations.
static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
- Region &launchOpBody, IRMapping &map) {
+ Region &launchOpBody, IRMapping &map,
+ bool hasCluster = false) {
OpBuilder builder(loc->getContext());
Block &firstBlock = launchOpBody.front();
builder.setInsertionPointToStart(&launchFuncOpBody.front());
- SmallVector<Value, 12> indexOps;
+ SmallVector<Value> indexOps;
+ // The order is important here, as it must match the order of the arguments
createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
+ if (hasCluster) {
+ createForAllDimensions<gpu::ClusterIdOp>(builder, loc, indexOps);
+ createForAllDimensions<gpu::ClusterDimOp>(builder, loc, indexOps);
+ }
// Replace the leading 12 function args with the respective thread/block index
// operations. Iterate backwards since args are erased and indices change.
for (const auto &indexOp : enumerate(indexOps))
@@ -212,9 +218,11 @@ static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
IRMapping map;
// Map the arguments corresponding to the launch parameters like blockIdx,
- // threadIdx, etc.
+ // threadIdx, etc. If cluster is present, then we also generate clusterIdx and
+ // clusterDim.
Region &outlinedFuncBody = outlinedFunc.getBody();
- injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
+ injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map,
+ launchOp.hasClusterSize());
// Map memory attributions from the LaunOp op to the GPUFuncOp attributions.
for (const auto &[launchArg, funcArg] :
@@ -278,12 +286,14 @@ static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
// The launch op has an optional dynamic shared memory size. If it doesn't
// exist, we use zero.
Value asyncToken = launchOp.getAsyncToken();
+ std::optional<gpu::KernelDim3> clusterSize =
+ launchOp.getClusterSizeOperandValues();
auto launchFunc = builder.create<gpu::LaunchFuncOp>(
launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
launchOp.getBlockSizeOperandValues(),
launchOp.getDynamicSharedMemorySize(), operands,
asyncToken ? asyncToken.getType() : nullptr,
- launchOp.getAsyncDependencies());
+ launchOp.getAsyncDependencies(), clusterSize);
launchOp.replaceAllUsesWith(launchFunc);
launchOp.erase();
}
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp
index 65c1daee6711..4a6154ea6d30 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMInlining.cpp
@@ -663,6 +663,10 @@ struct LLVMInlinerInterface : public DialectInlinerInterface {
<< "Cannot inline: callable is not an LLVM::LLVMFuncOp\n");
return false;
}
+ if (funcOp.isVarArg()) {
+ LLVM_DEBUG(llvm::dbgs() << "Cannot inline: callable is variadic\n");
+ return false;
+ }
// TODO: Generate aliasing metadata from noalias argument/result attributes.
if (auto attrs = funcOp.getArgAttrs()) {
for (DictionaryAttr attrDict : attrs->getAsRange<DictionaryAttr>()) {
@@ -704,7 +708,8 @@ struct LLVMInlinerInterface : public DialectInlinerInterface {
}
bool isLegalToInline(Operation *op, Region *, bool, IRMapping &) const final {
- return true;
+ // The inliner cannot handle variadic function arguments.
+ return !isa<LLVM::VaStartOp>(op);
}
/// Handle the given inlined return by replacing it with a branch. This
diff --git a/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp b/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp
index ecdadd3062d3..de6fb1b16977 100644
--- a/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp
+++ b/mlir/lib/Dialect/LLVMIR/Transforms/DIScopeForLLVMFuncOp.cpp
@@ -34,64 +34,95 @@ static FileLineColLoc extractFileLoc(Location loc) {
return FileLineColLoc();
}
+/// Creates a DISubprogramAttr with the provided compile unit and attaches it
+/// to the function. Does nothing when the function already has an attached
+/// subprogram.
+static void addScopeToFunction(LLVM::LLVMFuncOp llvmFunc,
+ LLVM::DICompileUnitAttr compileUnitAttr) {
+
+ Location loc = llvmFunc.getLoc();
+ if (loc->findInstanceOf<mlir::FusedLocWith<LLVM::DISubprogramAttr>>())
+ return;
+
+ MLIRContext *context = llvmFunc->getContext();
+
+ // Filename, line and colmun to associate to the function.
+ LLVM::DIFileAttr fileAttr;
+ int64_t line = 1, col = 1;
+ FileLineColLoc fileLoc = extractFileLoc(loc);
+ if (!fileLoc && compileUnitAttr) {
+ fileAttr = compileUnitAttr.getFile();
+ } else if (!fileLoc) {
+ fileAttr = LLVM::DIFileAttr::get(context, "<unknown>", "");
+ } else {
+ line = fileLoc.getLine();
+ col = fileLoc.getColumn();
+ StringRef inputFilePath = fileLoc.getFilename().getValue();
+ fileAttr =
+ LLVM::DIFileAttr::get(context, llvm::sys::path::filename(inputFilePath),
+ llvm::sys::path::parent_path(inputFilePath));
+ }
+ auto subroutineTypeAttr =
+ LLVM::DISubroutineTypeAttr::get(context, llvm::dwarf::DW_CC_normal, {});
+
+ StringAttr funcNameAttr = llvmFunc.getNameAttr();
+ // Only definitions need a distinct identifier and a compilation unit.
+ mlir::DistinctAttr id;
+ if (!llvmFunc.isExternal())
+ id = mlir::DistinctAttr::create(mlir::UnitAttr::get(context));
+ else
+ compileUnitAttr = {};
+ mlir::LLVM::DISubprogramAttr subprogramAttr = LLVM::DISubprogramAttr::get(
+ context, id, compileUnitAttr, fileAttr, funcNameAttr, funcNameAttr,
+ fileAttr,
+ /*line=*/line,
+ /*scopeline=*/col,
+ LLVM::DISubprogramFlags::Definition | LLVM::DISubprogramFlags::Optimized,
+ subroutineTypeAttr);
+ llvmFunc->setLoc(FusedLoc::get(context, {loc}, subprogramAttr));
+}
+
namespace {
/// Add a debug info scope to LLVMFuncOp that are missing it.
struct DIScopeForLLVMFuncOp
: public LLVM::impl::DIScopeForLLVMFuncOpBase<DIScopeForLLVMFuncOp> {
void runOnOperation() override {
- LLVM::LLVMFuncOp llvmFunc = getOperation();
- Location loc = llvmFunc.getLoc();
- if (loc->findInstanceOf<mlir::FusedLocWith<LLVM::DISubprogramAttr>>())
- return;
+ ModuleOp module = getOperation();
+ Location loc = module.getLoc();
MLIRContext *context = &getContext();
// To find a DICompileUnitAttr attached to a parent (the module for
// example), otherwise create a default one.
+ // Find a DICompileUnitAttr attached to the module, otherwise create a
+ // default one.
LLVM::DICompileUnitAttr compileUnitAttr;
- if (ModuleOp module = llvmFunc->getParentOfType<ModuleOp>()) {
- auto fusedCompileUnitAttr =
- module->getLoc()
- ->findInstanceOf<mlir::FusedLocWith<LLVM::DICompileUnitAttr>>();
- if (fusedCompileUnitAttr)
- compileUnitAttr = fusedCompileUnitAttr.getMetadata();
- }
-
- // Filename, line and colmun to associate to the function.
- LLVM::DIFileAttr fileAttr;
- int64_t line = 1, col = 1;
- FileLineColLoc fileLoc = extractFileLoc(loc);
- if (!fileLoc && compileUnitAttr) {
- fileAttr = compileUnitAttr.getFile();
- } else if (!fileLoc) {
- fileAttr = LLVM::DIFileAttr::get(context, "<unknown>", "");
+ auto fusedCompileUnitAttr =
+ module->getLoc()
+ ->findInstanceOf<mlir::FusedLocWith<LLVM::DICompileUnitAttr>>();
+ if (fusedCompileUnitAttr) {
+ compileUnitAttr = fusedCompileUnitAttr.getMetadata();
} else {
- line = fileLoc.getLine();
- col = fileLoc.getColumn();
- StringRef inputFilePath = fileLoc.getFilename().getValue();
- fileAttr = LLVM::DIFileAttr::get(
- context, llvm::sys::path::filename(inputFilePath),
- llvm::sys::path::parent_path(inputFilePath));
- }
- if (!compileUnitAttr) {
+ LLVM::DIFileAttr fileAttr;
+ if (FileLineColLoc fileLoc = extractFileLoc(loc)) {
+ StringRef inputFilePath = fileLoc.getFilename().getValue();
+ fileAttr = LLVM::DIFileAttr::get(
+ context, llvm::sys::path::filename(inputFilePath),
+ llvm::sys::path::parent_path(inputFilePath));
+ } else {
+ fileAttr = LLVM::DIFileAttr::get(context, "<unknown>", "");
+ }
+
compileUnitAttr = LLVM::DICompileUnitAttr::get(
- context, llvm::dwarf::DW_LANG_C, fileAttr,
- StringAttr::get(context, "MLIR"), /*isOptimized=*/true,
- LLVM::DIEmissionKind::LineTablesOnly);
+ context, DistinctAttr::create(UnitAttr::get(context)),
+ llvm::dwarf::DW_LANG_C, fileAttr, StringAttr::get(context, "MLIR"),
+ /*isOptimized=*/true, LLVM::DIEmissionKind::LineTablesOnly);
}
- auto subroutineTypeAttr =
- LLVM::DISubroutineTypeAttr::get(context, llvm::dwarf::DW_CC_normal, {});
-
- StringAttr funcNameAttr = llvmFunc.getNameAttr();
- auto subprogramAttr =
- LLVM::DISubprogramAttr::get(context, compileUnitAttr, fileAttr,
- funcNameAttr, funcNameAttr, fileAttr,
- /*line=*/line,
- /*scopeline=*/col,
- LLVM::DISubprogramFlags::Definition |
- LLVM::DISubprogramFlags::Optimized,
- subroutineTypeAttr);
- llvmFunc->setLoc(FusedLoc::get(context, {loc}, subprogramAttr));
+
+ // Create subprograms for each function with the same distinct compile unit.
+ module.walk([&](LLVM::LLVMFuncOp func) {
+ addScopeToFunction(func, compileUnitAttr);
+ });
}
};
@@ -99,4 +130,4 @@ struct DIScopeForLLVMFuncOp
std::unique_ptr<Pass> mlir::LLVM::createDIScopeForLLVMFuncOpPass() {
return std::make_unique<DIScopeForLLVMFuncOp>();
-} \ No newline at end of file
+}
diff --git a/mlir/lib/Dialect/SPIRV/IR/AtomicOps.cpp b/mlir/lib/Dialect/SPIRV/IR/AtomicOps.cpp
index 3efa955e7d8b..7e33e91414e0 100644
--- a/mlir/lib/Dialect/SPIRV/IR/AtomicOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/AtomicOps.cpp
@@ -19,49 +19,6 @@ using namespace mlir::spirv::AttrNames;
namespace mlir::spirv {
-// Parses an atomic update op. If the update op does not take a value (like
-// AtomicIIncrement) `hasValue` must be false.
-static ParseResult parseAtomicUpdateOp(OpAsmParser &parser,
- OperationState &state, bool hasValue) {
- spirv::Scope scope;
- spirv::MemorySemantics memoryScope;
- SmallVector<OpAsmParser::UnresolvedOperand, 2> operandInfo;
- OpAsmParser::UnresolvedOperand ptrInfo, valueInfo;
- Type type;
- SMLoc loc;
- if (parseEnumStrAttr<spirv::ScopeAttr>(scope, parser, state,
- kMemoryScopeAttrName) ||
- parseEnumStrAttr<spirv::MemorySemanticsAttr>(memoryScope, parser, state,
- kSemanticsAttrName) ||
- parser.parseOperandList(operandInfo, (hasValue ? 2 : 1)) ||
- parser.getCurrentLocation(&loc) || parser.parseColonType(type))
- return failure();
-
- auto ptrType = llvm::dyn_cast<spirv::PointerType>(type);
- if (!ptrType)
- return parser.emitError(loc, "expected pointer type");
-
- SmallVector<Type, 2> operandTypes;
- operandTypes.push_back(ptrType);
- if (hasValue)
- operandTypes.push_back(ptrType.getPointeeType());
- if (parser.resolveOperands(operandInfo, operandTypes, parser.getNameLoc(),
- state.operands))
- return failure();
- return parser.addTypeToList(ptrType.getPointeeType(), state.types);
-}
-
-// Prints an atomic update op.
-static void printAtomicUpdateOp(Operation *op, OpAsmPrinter &printer) {
- printer << " \"";
- auto scopeAttr = op->getAttrOfType<spirv::ScopeAttr>(kMemoryScopeAttrName);
- printer << spirv::stringifyScope(scopeAttr.getValue()) << "\" \"";
- auto memorySemanticsAttr =
- op->getAttrOfType<spirv::MemorySemanticsAttr>(kSemanticsAttrName);
- printer << spirv::stringifyMemorySemantics(memorySemanticsAttr.getValue())
- << "\" " << op->getOperands() << " : " << op->getOperand(0).getType();
-}
-
template <typename T>
static StringRef stringifyTypeName();
@@ -85,13 +42,6 @@ static LogicalResult verifyAtomicUpdateOp(Operation *op) {
<< stringifyTypeName<ExpectedElementType>()
<< " value, found " << elementType;
- if (op->getNumOperands() > 1) {
- auto valueType = op->getOperand(1).getType();
- if (valueType != elementType)
- return op->emitOpError("expected value to have the same type as the "
- "pointer operand's pointee type ")
- << elementType << ", but found " << valueType;
- }
auto memorySemantics =
op->getAttrOfType<spirv::MemorySemanticsAttr>(kSemanticsAttrName)
.getValue();
@@ -101,78 +51,6 @@ static LogicalResult verifyAtomicUpdateOp(Operation *op) {
return success();
}
-template <typename T>
-static void printAtomicCompareExchangeImpl(T atomOp, OpAsmPrinter &printer) {
- printer << " \"" << stringifyScope(atomOp.getMemoryScope()) << "\" \""
- << stringifyMemorySemantics(atomOp.getEqualSemantics()) << "\" \""
- << stringifyMemorySemantics(atomOp.getUnequalSemantics()) << "\" "
- << atomOp.getOperands() << " : " << atomOp.getPointer().getType();
-}
-
-static ParseResult parseAtomicCompareExchangeImpl(OpAsmParser &parser,
- OperationState &state) {
- spirv::Scope memoryScope;
- spirv::MemorySemantics equalSemantics, unequalSemantics;
- SmallVector<OpAsmParser::UnresolvedOperand, 3> operandInfo;
- Type type;
- if (parseEnumStrAttr<spirv::ScopeAttr>(memoryScope, parser, state,
- kMemoryScopeAttrName) ||
- parseEnumStrAttr<spirv::MemorySemanticsAttr>(
- equalSemantics, parser, state, kEqualSemanticsAttrName) ||
- parseEnumStrAttr<spirv::MemorySemanticsAttr>(
- unequalSemantics, parser, state, kUnequalSemanticsAttrName) ||
- parser.parseOperandList(operandInfo, 3))
- return failure();
-
- auto loc = parser.getCurrentLocation();
- if (parser.parseColonType(type))
- return failure();
-
- auto ptrType = llvm::dyn_cast<spirv::PointerType>(type);
- if (!ptrType)
- return parser.emitError(loc, "expected pointer type");
-
- if (parser.resolveOperands(
- operandInfo,
- {ptrType, ptrType.getPointeeType(), ptrType.getPointeeType()},
- parser.getNameLoc(), state.operands))
- return failure();
-
- return parser.addTypeToList(ptrType.getPointeeType(), state.types);
-}
-
-template <typename T>
-static LogicalResult verifyAtomicCompareExchangeImpl(T atomOp) {
- // According to the spec:
- // "The type of Value must be the same as Result Type. The type of the value
- // pointed to by Pointer must be the same as Result Type. This type must also
- // match the type of Comparator."
- if (atomOp.getType() != atomOp.getValue().getType())
- return atomOp.emitOpError("value operand must have the same type as the op "
- "result, but found ")
- << atomOp.getValue().getType() << " vs " << atomOp.getType();
-
- if (atomOp.getType() != atomOp.getComparator().getType())
- return atomOp.emitOpError(
- "comparator operand must have the same type as the op "
- "result, but found ")
- << atomOp.getComparator().getType() << " vs " << atomOp.getType();
-
- Type pointeeType =
- llvm::cast<spirv::PointerType>(atomOp.getPointer().getType())
- .getPointeeType();
- if (atomOp.getType() != pointeeType)
- return atomOp.emitOpError(
- "pointer operand's pointee type must have the same "
- "as the op result type, but found ")
- << pointeeType << " vs " << atomOp.getType();
-
- // TODO: Unequal cannot be set to Release or Acquire and Release.
- // In addition, Unequal cannot be set to a stronger memory-order then Equal.
-
- return success();
-}
-
//===----------------------------------------------------------------------===//
// spirv.AtomicAndOp
//===----------------------------------------------------------------------===//
@@ -181,100 +59,6 @@ LogicalResult AtomicAndOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicAndOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicAndOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
-//===----------------------------------------------------------------------===//
-// spirv.AtomicCompareExchangeOp
-//===----------------------------------------------------------------------===//
-
-LogicalResult AtomicCompareExchangeOp::verify() {
- return verifyAtomicCompareExchangeImpl(*this);
-}
-
-ParseResult AtomicCompareExchangeOp::parse(OpAsmParser &parser,
- OperationState &result) {
- return parseAtomicCompareExchangeImpl(parser, result);
-}
-
-void AtomicCompareExchangeOp::print(OpAsmPrinter &p) {
- printAtomicCompareExchangeImpl(*this, p);
-}
-
-//===----------------------------------------------------------------------===//
-// spirv.AtomicCompareExchangeWeakOp
-//===----------------------------------------------------------------------===//
-
-LogicalResult AtomicCompareExchangeWeakOp::verify() {
- return verifyAtomicCompareExchangeImpl(*this);
-}
-
-ParseResult AtomicCompareExchangeWeakOp::parse(OpAsmParser &parser,
- OperationState &result) {
- return parseAtomicCompareExchangeImpl(parser, result);
-}
-
-void AtomicCompareExchangeWeakOp::print(OpAsmPrinter &p) {
- printAtomicCompareExchangeImpl(*this, p);
-}
-
-//===----------------------------------------------------------------------===//
-// spirv.AtomicExchange
-//===----------------------------------------------------------------------===//
-
-void AtomicExchangeOp::print(OpAsmPrinter &printer) {
- printer << " \"" << stringifyScope(getMemoryScope()) << "\" \""
- << stringifyMemorySemantics(getSemantics()) << "\" " << getOperands()
- << " : " << getPointer().getType();
-}
-
-ParseResult AtomicExchangeOp::parse(OpAsmParser &parser,
- OperationState &result) {
- spirv::Scope memoryScope;
- spirv::MemorySemantics semantics;
- SmallVector<OpAsmParser::UnresolvedOperand, 2> operandInfo;
- Type type;
- if (parseEnumStrAttr<spirv::ScopeAttr>(memoryScope, parser, result,
- kMemoryScopeAttrName) ||
- parseEnumStrAttr<spirv::MemorySemanticsAttr>(semantics, parser, result,
- kSemanticsAttrName) ||
- parser.parseOperandList(operandInfo, 2))
- return failure();
-
- auto loc = parser.getCurrentLocation();
- if (parser.parseColonType(type))
- return failure();
-
- auto ptrType = llvm::dyn_cast<spirv::PointerType>(type);
- if (!ptrType)
- return parser.emitError(loc, "expected pointer type");
-
- if (parser.resolveOperands(operandInfo, {ptrType, ptrType.getPointeeType()},
- parser.getNameLoc(), result.operands))
- return failure();
-
- return parser.addTypeToList(ptrType.getPointeeType(), result.types);
-}
-
-LogicalResult AtomicExchangeOp::verify() {
- if (getType() != getValue().getType())
- return emitOpError("value operand must have the same type as the op "
- "result, but found ")
- << getValue().getType() << " vs " << getType();
-
- Type pointeeType =
- llvm::cast<spirv::PointerType>(getPointer().getType()).getPointeeType();
- if (getType() != pointeeType)
- return emitOpError("pointer operand's pointee type must have the same "
- "as the op result type, but found ")
- << pointeeType << " vs " << getType();
-
- return success();
-}
-
//===----------------------------------------------------------------------===//
// spirv.AtomicIAddOp
//===----------------------------------------------------------------------===//
@@ -283,12 +67,6 @@ LogicalResult AtomicIAddOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicIAddOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicIAddOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
//===----------------------------------------------------------------------===//
// spirv.EXT.AtomicFAddOp
//===----------------------------------------------------------------------===//
@@ -297,15 +75,6 @@ LogicalResult EXTAtomicFAddOp::verify() {
return verifyAtomicUpdateOp<FloatType>(getOperation());
}
-ParseResult EXTAtomicFAddOp::parse(OpAsmParser &parser,
- OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void spirv::EXTAtomicFAddOp::print(OpAsmPrinter &p) {
- printAtomicUpdateOp(*this, p);
-}
-
//===----------------------------------------------------------------------===//
// spirv.AtomicIDecrementOp
//===----------------------------------------------------------------------===//
@@ -314,15 +83,6 @@ LogicalResult AtomicIDecrementOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicIDecrementOp::parse(OpAsmParser &parser,
- OperationState &result) {
- return parseAtomicUpdateOp(parser, result, false);
-}
-
-void AtomicIDecrementOp::print(OpAsmPrinter &p) {
- printAtomicUpdateOp(*this, p);
-}
-
//===----------------------------------------------------------------------===//
// spirv.AtomicIIncrementOp
//===----------------------------------------------------------------------===//
@@ -331,15 +91,6 @@ LogicalResult AtomicIIncrementOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicIIncrementOp::parse(OpAsmParser &parser,
- OperationState &result) {
- return parseAtomicUpdateOp(parser, result, false);
-}
-
-void AtomicIIncrementOp::print(OpAsmPrinter &p) {
- printAtomicUpdateOp(*this, p);
-}
-
//===----------------------------------------------------------------------===//
// spirv.AtomicISubOp
//===----------------------------------------------------------------------===//
@@ -348,12 +99,6 @@ LogicalResult AtomicISubOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicISubOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicISubOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
//===----------------------------------------------------------------------===//
// spirv.AtomicOrOp
//===----------------------------------------------------------------------===//
@@ -362,12 +107,6 @@ LogicalResult AtomicOrOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicOrOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicOrOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
//===----------------------------------------------------------------------===//
// spirv.AtomicSMaxOp
//===----------------------------------------------------------------------===//
@@ -376,12 +115,6 @@ LogicalResult AtomicSMaxOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicSMaxOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicSMaxOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
//===----------------------------------------------------------------------===//
// spirv.AtomicSMinOp
//===----------------------------------------------------------------------===//
@@ -390,12 +123,6 @@ LogicalResult AtomicSMinOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicSMinOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicSMinOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
//===----------------------------------------------------------------------===//
// spirv.AtomicUMaxOp
//===----------------------------------------------------------------------===//
@@ -404,12 +131,6 @@ LogicalResult AtomicUMaxOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicUMaxOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicUMaxOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
//===----------------------------------------------------------------------===//
// spirv.AtomicUMinOp
//===----------------------------------------------------------------------===//
@@ -418,12 +139,6 @@ LogicalResult AtomicUMinOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicUMinOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicUMinOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
//===----------------------------------------------------------------------===//
// spirv.AtomicXorOp
//===----------------------------------------------------------------------===//
@@ -432,10 +147,4 @@ LogicalResult AtomicXorOp::verify() {
return verifyAtomicUpdateOp<IntegerType>(getOperation());
}
-ParseResult AtomicXorOp::parse(OpAsmParser &parser, OperationState &result) {
- return parseAtomicUpdateOp(parser, result, true);
-}
-
-void AtomicXorOp::print(OpAsmPrinter &p) { printAtomicUpdateOp(*this, p); }
-
} // namespace mlir::spirv
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
index 8a68decc5878..d7944d600b0a 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
@@ -992,30 +992,39 @@ static LogicalResult verifyRegionAttribute(Location loc, Type valueType,
StringRef symbol = attribute.getName().strref();
Attribute attr = attribute.getValue();
- if (symbol != spirv::getInterfaceVarABIAttrName())
- return emitError(loc, "found unsupported '")
- << symbol << "' attribute on region argument";
-
- auto varABIAttr = llvm::dyn_cast<spirv::InterfaceVarABIAttr>(attr);
- if (!varABIAttr)
- return emitError(loc, "'")
- << symbol << "' must be a spirv::InterfaceVarABIAttr";
-
- if (varABIAttr.getStorageClass() && !valueType.isIntOrIndexOrFloat())
- return emitError(loc, "'") << symbol
- << "' attribute cannot specify storage class "
- "when attaching to a non-scalar value";
+ if (symbol == spirv::getInterfaceVarABIAttrName()) {
+ auto varABIAttr = llvm::dyn_cast<spirv::InterfaceVarABIAttr>(attr);
+ if (!varABIAttr)
+ return emitError(loc, "'")
+ << symbol << "' must be a spirv::InterfaceVarABIAttr";
+
+ if (varABIAttr.getStorageClass() && !valueType.isIntOrIndexOrFloat())
+ return emitError(loc, "'") << symbol
+ << "' attribute cannot specify storage class "
+ "when attaching to a non-scalar value";
+ return success();
+ }
+ if (symbol == spirv::DecorationAttr::name) {
+ if (!isa<spirv::DecorationAttr>(attr))
+ return emitError(loc, "'")
+ << symbol << "' must be a spirv::DecorationAttr";
+ return success();
+ }
- return success();
+ return emitError(loc, "found unsupported '")
+ << symbol << "' attribute on region argument";
}
LogicalResult SPIRVDialect::verifyRegionArgAttribute(Operation *op,
unsigned regionIndex,
unsigned argIndex,
NamedAttribute attribute) {
- return verifyRegionAttribute(
- op->getLoc(), op->getRegion(regionIndex).getArgument(argIndex).getType(),
- attribute);
+ auto funcOp = dyn_cast<FunctionOpInterface>(op);
+ if (!funcOp)
+ return success();
+ Type argType = funcOp.getArgumentTypes()[argIndex];
+
+ return verifyRegionAttribute(op->getLoc(), argType, attribute);
}
LogicalResult SPIRVDialect::verifyRegionResultAttribute(
diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index 5343a12132a9..3b159030cab7 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -972,8 +972,73 @@ void spirv::FuncOp::print(OpAsmPrinter &printer) {
}
LogicalResult spirv::FuncOp::verifyType() {
- if (getFunctionType().getNumResults() > 1)
+ FunctionType fnType = getFunctionType();
+ if (fnType.getNumResults() > 1)
return emitOpError("cannot have more than one result");
+
+ auto hasDecorationAttr = [&](spirv::Decoration decoration,
+ unsigned argIndex) {
+ auto func = llvm::cast<FunctionOpInterface>(getOperation());
+ for (auto argAttr : cast<FunctionOpInterface>(func).getArgAttrs(argIndex)) {
+ if (argAttr.getName() != spirv::DecorationAttr::name)
+ continue;
+ if (auto decAttr = dyn_cast<spirv::DecorationAttr>(argAttr.getValue()))
+ return decAttr.getValue() == decoration;
+ }
+ return false;
+ };
+
+ for (unsigned i = 0, e = this->getNumArguments(); i != e; ++i) {
+ Type param = fnType.getInputs()[i];
+ auto inputPtrType = dyn_cast<spirv::PointerType>(param);
+ if (!inputPtrType)
+ continue;
+
+ auto pointeePtrType =
+ dyn_cast<spirv::PointerType>(inputPtrType.getPointeeType());
+ if (pointeePtrType) {
+ // SPIR-V spec, from SPV_KHR_physical_storage_buffer:
+ // > If an OpFunctionParameter is a pointer (or contains a pointer)
+ // > and the type it points to is a pointer in the PhysicalStorageBuffer
+ // > storage class, the function parameter must be decorated with exactly
+ // > one of AliasedPointer or RestrictPointer.
+ if (pointeePtrType.getStorageClass() !=
+ spirv::StorageClass::PhysicalStorageBuffer)
+ continue;
+
+ bool hasAliasedPtr =
+ hasDecorationAttr(spirv::Decoration::AliasedPointer, i);
+ bool hasRestrictPtr =
+ hasDecorationAttr(spirv::Decoration::RestrictPointer, i);
+ if (!hasAliasedPtr && !hasRestrictPtr)
+ return emitOpError()
+ << "with a pointer points to a physical buffer pointer must "
+ "be decorated either 'AliasedPointer' or 'RestrictPointer'";
+ continue;
+ }
+ // SPIR-V spec, from SPV_KHR_physical_storage_buffer:
+ // > If an OpFunctionParameter is a pointer (or contains a pointer) in
+ // > the PhysicalStorageBuffer storage class, the function parameter must
+ // > be decorated with exactly one of Aliased or Restrict.
+ if (auto pointeeArrayType =
+ dyn_cast<spirv::ArrayType>(inputPtrType.getPointeeType())) {
+ pointeePtrType =
+ dyn_cast<spirv::PointerType>(pointeeArrayType.getElementType());
+ } else {
+ pointeePtrType = inputPtrType;
+ }
+
+ if (!pointeePtrType || pointeePtrType.getStorageClass() !=
+ spirv::StorageClass::PhysicalStorageBuffer)
+ continue;
+
+ bool hasAliased = hasDecorationAttr(spirv::Decoration::Aliased, i);
+ bool hasRestrict = hasDecorationAttr(spirv::Decoration::Restrict, i);
+ if (!hasAliased && !hasRestrict)
+ return emitOpError() << "with physical buffer pointer must be decorated "
+ "either 'Aliased' or 'Restrict'";
+ }
+
return success();
}
diff --git a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
index a563ec5cb8db..39e5e9997f7d 100644
--- a/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
+++ b/mlir/lib/Interfaces/ControlFlowInterfaces.cpp
@@ -219,11 +219,18 @@ LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) {
return success();
}
-/// Return `true` if region `r` is reachable from region `begin` according to
-/// the RegionBranchOpInterface (by taking a branch).
-static bool isRegionReachable(Region *begin, Region *r) {
- assert(begin->getParentOp() == r->getParentOp() &&
- "expected that both regions belong to the same op");
+/// Stop condition for `traverseRegionGraph`. The traversal is interrupted if
+/// this function returns "true" for a successor region. The first parameter is
+/// the successor region. The second parameter indicates all already visited
+/// regions.
+using StopConditionFn = function_ref<bool(Region *, ArrayRef<bool> visited)>;
+
+/// Traverse the region graph starting at `begin`. The traversal is interrupted
+/// if `stopCondition` evaluates to "true" for a successor region. In that case,
+/// this function returns "true". Otherwise, if the traversal was not
+/// interrupted, this function returns "false".
+static bool traverseRegionGraph(Region *begin,
+ StopConditionFn stopConditionFn) {
auto op = cast<RegionBranchOpInterface>(begin->getParentOp());
SmallVector<bool> visited(op->getNumRegions(), false);
visited[begin->getRegionNumber()] = true;
@@ -242,7 +249,7 @@ static bool isRegionReachable(Region *begin, Region *r) {
// Process all regions in the worklist via DFS.
while (!worklist.empty()) {
Region *nextRegion = worklist.pop_back_val();
- if (nextRegion == r)
+ if (stopConditionFn(nextRegion, visited))
return true;
if (visited[nextRegion->getRegionNumber()])
continue;
@@ -253,6 +260,18 @@ static bool isRegionReachable(Region *begin, Region *r) {
return false;
}
+/// Return `true` if region `r` is reachable from region `begin` according to
+/// the RegionBranchOpInterface (by taking a branch).
+static bool isRegionReachable(Region *begin, Region *r) {
+ assert(begin->getParentOp() == r->getParentOp() &&
+ "expected that both regions belong to the same op");
+ return traverseRegionGraph(begin,
+ [&](Region *nextRegion, ArrayRef<bool> visited) {
+ // Interrupt traversal if `r` was reached.
+ return nextRegion == r;
+ });
+}
+
/// Return `true` if `a` and `b` are in mutually exclusive regions.
///
/// 1. Find the first common of `a` and `b` (ancestor) that implements
@@ -306,6 +325,21 @@ bool RegionBranchOpInterface::isRepetitiveRegion(unsigned index) {
return isRegionReachable(region, region);
}
+bool RegionBranchOpInterface::hasLoop() {
+ SmallVector<RegionSuccessor> entryRegions;
+ getSuccessorRegions(RegionBranchPoint::parent(), entryRegions);
+ for (RegionSuccessor successor : entryRegions)
+ if (!successor.isParent() &&
+ traverseRegionGraph(successor.getSuccessor(),
+ [](Region *nextRegion, ArrayRef<bool> visited) {
+ // Interrupt traversal if the region was already
+ // visited.
+ return visited[nextRegion->getRegionNumber()];
+ }))
+ return true;
+ return false;
+}
+
Region *mlir::getEnclosingRepetitiveRegion(Operation *op) {
while (Region *region = op->getParentRegion()) {
op = region->getParentOp();
diff --git a/mlir/lib/Target/LLVMIR/DebugImporter.cpp b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
index afc6918eae97..652129523009 100644
--- a/mlir/lib/Target/LLVMIR/DebugImporter.cpp
+++ b/mlir/lib/Target/LLVMIR/DebugImporter.cpp
@@ -50,10 +50,11 @@ DIBasicTypeAttr DebugImporter::translateImpl(llvm::DIBasicType *node) {
DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) {
std::optional<DIEmissionKind> emissionKind =
symbolizeDIEmissionKind(node->getEmissionKind());
- return DICompileUnitAttr::get(context, node->getSourceLanguage(),
- translate(node->getFile()),
- getStringAttrOrNull(node->getRawProducer()),
- node->isOptimized(), emissionKind.value());
+ return DICompileUnitAttr::get(
+ context, DistinctAttr::create(UnitAttr::get(context)),
+ node->getSourceLanguage(), translate(node->getFile()),
+ getStringAttrOrNull(node->getRawProducer()), node->isOptimized(),
+ emissionKind.value());
}
DICompositeTypeAttr DebugImporter::translateImpl(llvm::DICompositeType *node) {
@@ -162,6 +163,10 @@ DINamespaceAttr DebugImporter::translateImpl(llvm::DINamespace *node) {
}
DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) {
+ // Only definitions require a distinct identifier.
+ mlir::DistinctAttr id;
+ if (node->isDistinct())
+ id = DistinctAttr::create(UnitAttr::get(context));
std::optional<DISubprogramFlags> subprogramFlags =
symbolizeDISubprogramFlags(node->getSubprogram()->getSPFlags());
// Return nullptr if the scope or type is a cyclic dependency.
@@ -171,7 +176,7 @@ DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) {
DISubroutineTypeAttr type = translate(node->getType());
if (node->getType() && !type)
return nullptr;
- return DISubprogramAttr::get(context, translate(node->getUnit()), scope,
+ return DISubprogramAttr::get(context, id, translate(node->getUnit()), scope,
getStringAttrOrNull(node->getRawName()),
getStringAttrOrNull(node->getRawLinkageName()),
translate(node->getFile()), node->getLine(),
diff --git a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
index 00645d2c4551..0c521adb1133 100644
--- a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
+++ b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
@@ -239,8 +239,7 @@ LogicalResult spirv::Deserializer::processDecoration(ArrayRef<uint32_t> words) {
if (decorationName.empty()) {
return emitError(unknownLoc, "invalid Decoration code : ") << words[1];
}
- auto attrName = llvm::convertToSnakeFromCamelCase(decorationName);
- auto symbol = opBuilder.getStringAttr(attrName);
+ auto symbol = getSymbolDecoration(decorationName);
switch (static_cast<spirv::Decoration>(words[1])) {
case spirv::Decoration::FPFastMathMode:
if (words.size() != 3) {
@@ -298,6 +297,7 @@ LogicalResult spirv::Deserializer::processDecoration(ArrayRef<uint32_t> words) {
break;
}
case spirv::Decoration::Aliased:
+ case spirv::Decoration::AliasedPointer:
case spirv::Decoration::Block:
case spirv::Decoration::BufferBlock:
case spirv::Decoration::Flat:
@@ -308,6 +308,7 @@ LogicalResult spirv::Deserializer::processDecoration(ArrayRef<uint32_t> words) {
case spirv::Decoration::NoUnsignedWrap:
case spirv::Decoration::RelaxedPrecision:
case spirv::Decoration::Restrict:
+ case spirv::Decoration::RestrictPointer:
if (words.size() != 2) {
return emitError(unknownLoc, "OpDecoration with ")
<< decorationName << "needs a single target <id>";
@@ -369,6 +370,46 @@ LogicalResult spirv::Deserializer::processMemberName(ArrayRef<uint32_t> words) {
return success();
}
+LogicalResult spirv::Deserializer::setFunctionArgAttrs(
+ uint32_t argID, SmallVectorImpl<Attribute> &argAttrs, size_t argIndex) {
+ if (!decorations.contains(argID)) {
+ argAttrs[argIndex] = DictionaryAttr::get(context, {});
+ return success();
+ }
+
+ spirv::DecorationAttr foundDecorationAttr;
+ for (NamedAttribute decAttr : decorations[argID]) {
+ for (auto decoration :
+ {spirv::Decoration::Aliased, spirv::Decoration::Restrict,
+ spirv::Decoration::AliasedPointer,
+ spirv::Decoration::RestrictPointer}) {
+
+ if (decAttr.getName() !=
+ getSymbolDecoration(stringifyDecoration(decoration)))
+ continue;
+
+ if (foundDecorationAttr)
+ return emitError(unknownLoc,
+ "more than one Aliased/Restrict decorations for "
+ "function argument with result <id> ")
+ << argID;
+
+ foundDecorationAttr = spirv::DecorationAttr::get(context, decoration);
+ break;
+ }
+ }
+
+ if (!foundDecorationAttr)
+ return emitError(unknownLoc, "unimplemented decoration support for "
+ "function argument with result <id> ")
+ << argID;
+
+ NamedAttribute attr(StringAttr::get(context, spirv::DecorationAttr::name),
+ foundDecorationAttr);
+ argAttrs[argIndex] = DictionaryAttr::get(context, attr);
+ return success();
+}
+
LogicalResult
spirv::Deserializer::processFunction(ArrayRef<uint32_t> operands) {
if (curFunction) {
@@ -430,6 +471,9 @@ spirv::Deserializer::processFunction(ArrayRef<uint32_t> operands) {
logger.indent();
});
+ SmallVector<Attribute> argAttrs;
+ argAttrs.resize(functionType.getNumInputs());
+
// Parse the op argument instructions
if (functionType.getNumInputs()) {
for (size_t i = 0, e = functionType.getNumInputs(); i != e; ++i) {
@@ -463,11 +507,21 @@ spirv::Deserializer::processFunction(ArrayRef<uint32_t> operands) {
return emitError(unknownLoc, "duplicate definition of result <id> ")
<< operands[1];
}
+ if (failed(setFunctionArgAttrs(operands[1], argAttrs, i))) {
+ return failure();
+ }
+
auto argValue = funcOp.getArgument(i);
valueMap[operands[1]] = argValue;
}
}
+ if (llvm::any_of(argAttrs, [](Attribute attr) {
+ auto argAttr = cast<DictionaryAttr>(attr);
+ return !argAttr.empty();
+ }))
+ funcOp.setArgAttrsAttr(ArrayAttr::get(context, argAttrs));
+
// entryBlock is needed to access the arguments, Once that is done, we can
// erase the block for functions with 'Import' LinkageAttributes, since these
// are essentially function declarations, so they have no body.
diff --git a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h
index 69be47851ef3..fc9a8f5f9364 100644
--- a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h
+++ b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h
@@ -233,6 +233,19 @@ private:
return globalVariableMap.lookup(id);
}
+ /// Sets the function argument's attributes. |argID| is the function
+ /// argument's result <id>, and |argIndex| is its index in the function's
+ /// argument list.
+ LogicalResult setFunctionArgAttrs(uint32_t argID,
+ SmallVectorImpl<Attribute> &argAttrs,
+ size_t argIndex);
+
+ /// Gets the symbol name from the name of decoration.
+ StringAttr getSymbolDecoration(StringRef decorationName) {
+ auto attrName = llvm::convertToSnakeFromCamelCase(decorationName);
+ return opBuilder.getStringAttr(attrName);
+ }
+
//===--------------------------------------------------------------------===//
// Type
//===--------------------------------------------------------------------===//
diff --git a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
index 7bfcca5b4dcd..41d2c0310d00 100644
--- a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
+++ b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
@@ -177,6 +177,34 @@ LogicalResult Serializer::processUndefOp(spirv::UndefOp op) {
return success();
}
+LogicalResult Serializer::processFuncParameter(spirv::FuncOp op) {
+ for (auto [idx, arg] : llvm::enumerate(op.getArguments())) {
+ uint32_t argTypeID = 0;
+ if (failed(processType(op.getLoc(), arg.getType(), argTypeID))) {
+ return failure();
+ }
+ auto argValueID = getNextID();
+
+ // Process decoration attributes of arguments.
+ auto funcOp = cast<FunctionOpInterface>(*op);
+ for (auto argAttr : funcOp.getArgAttrs(idx)) {
+ if (argAttr.getName() != DecorationAttr::name)
+ continue;
+
+ if (auto decAttr = dyn_cast<DecorationAttr>(argAttr.getValue())) {
+ if (failed(processDecorationAttr(op->getLoc(), argValueID,
+ decAttr.getValue(), decAttr)))
+ return failure();
+ }
+ }
+
+ valueIDMap[arg] = argValueID;
+ encodeInstructionInto(functionHeader, spirv::Opcode::OpFunctionParameter,
+ {argTypeID, argValueID});
+ }
+ return success();
+}
+
LogicalResult Serializer::processFuncOp(spirv::FuncOp op) {
LLVM_DEBUG(llvm::dbgs() << "-- start function '" << op.getName() << "' --\n");
assert(functionHeader.empty() && functionBody.empty());
@@ -229,32 +257,15 @@ LogicalResult Serializer::processFuncOp(spirv::FuncOp op) {
// is going to return false for this function from now on)
// Hence, we'll remove the body once we are done with the serialization.
op.addEntryBlock();
- for (auto arg : op.getArguments()) {
- uint32_t argTypeID = 0;
- if (failed(processType(op.getLoc(), arg.getType(), argTypeID))) {
- return failure();
- }
- auto argValueID = getNextID();
- valueIDMap[arg] = argValueID;
- encodeInstructionInto(functionHeader, spirv::Opcode::OpFunctionParameter,
- {argTypeID, argValueID});
- }
+ if (failed(processFuncParameter(op)))
+ return failure();
// Don't need to process the added block, there is nothing to process,
// the fake body was added just to get the arguments, remove the body,
// since it's use is done.
op.eraseBody();
} else {
- // Declare the parameters.
- for (auto arg : op.getArguments()) {
- uint32_t argTypeID = 0;
- if (failed(processType(op.getLoc(), arg.getType(), argTypeID))) {
- return failure();
- }
- auto argValueID = getNextID();
- valueIDMap[arg] = argValueID;
- encodeInstructionInto(functionHeader, spirv::Opcode::OpFunctionParameter,
- {argTypeID, argValueID});
- }
+ if (failed(processFuncParameter(op)))
+ return failure();
// Some instructions (e.g., OpVariable) in a function must be in the first
// block in the function. These instructions will be put in
diff --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
index 9e9a16456cc1..1029fb933175 100644
--- a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
+++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
@@ -215,23 +215,15 @@ static std::string getDecorationName(StringRef attrName) {
return llvm::convertToCamelFromSnakeCase(attrName, /*capitalizeFirst=*/true);
}
-LogicalResult Serializer::processDecoration(Location loc, uint32_t resultID,
- NamedAttribute attr) {
- auto attrName = attr.getName().strref();
- auto decorationName = getDecorationName(attrName);
- auto decoration = spirv::symbolizeDecoration(decorationName);
- if (!decoration) {
- return emitError(
- loc, "non-argument attributes expected to have snake-case-ified "
- "decoration name, unhandled attribute with name : ")
- << attrName;
- }
+LogicalResult Serializer::processDecorationAttr(Location loc, uint32_t resultID,
+ Decoration decoration,
+ Attribute attr) {
SmallVector<uint32_t, 1> args;
- switch (*decoration) {
+ switch (decoration) {
case spirv::Decoration::LinkageAttributes: {
// Get the value of the Linkage Attributes
// e.g., LinkageAttributes=["linkageName", linkageType].
- auto linkageAttr = llvm::dyn_cast<spirv::LinkageAttributesAttr>(attr.getValue());
+ auto linkageAttr = llvm::dyn_cast<spirv::LinkageAttributesAttr>(attr);
auto linkageName = linkageAttr.getLinkageName();
auto linkageType = linkageAttr.getLinkageType().getValue();
// Encode the Linkage Name (string literal to uint32_t).
@@ -241,32 +233,36 @@ LogicalResult Serializer::processDecoration(Location loc, uint32_t resultID,
break;
}
case spirv::Decoration::FPFastMathMode:
- if (auto intAttr = dyn_cast<FPFastMathModeAttr>(attr.getValue())) {
+ if (auto intAttr = dyn_cast<FPFastMathModeAttr>(attr)) {
args.push_back(static_cast<uint32_t>(intAttr.getValue()));
break;
}
return emitError(loc, "expected FPFastMathModeAttr attribute for ")
- << attrName;
+ << stringifyDecoration(decoration);
case spirv::Decoration::Binding:
case spirv::Decoration::DescriptorSet:
case spirv::Decoration::Location:
- if (auto intAttr = dyn_cast<IntegerAttr>(attr.getValue())) {
+ if (auto intAttr = dyn_cast<IntegerAttr>(attr)) {
args.push_back(intAttr.getValue().getZExtValue());
break;
}
- return emitError(loc, "expected integer attribute for ") << attrName;
+ return emitError(loc, "expected integer attribute for ")
+ << stringifyDecoration(decoration);
case spirv::Decoration::BuiltIn:
- if (auto strAttr = dyn_cast<StringAttr>(attr.getValue())) {
+ if (auto strAttr = dyn_cast<StringAttr>(attr)) {
auto enumVal = spirv::symbolizeBuiltIn(strAttr.getValue());
if (enumVal) {
args.push_back(static_cast<uint32_t>(*enumVal));
break;
}
return emitError(loc, "invalid ")
- << attrName << " attribute " << strAttr.getValue();
+ << stringifyDecoration(decoration) << " decoration attribute "
+ << strAttr.getValue();
}
- return emitError(loc, "expected string attribute for ") << attrName;
+ return emitError(loc, "expected string attribute for ")
+ << stringifyDecoration(decoration);
case spirv::Decoration::Aliased:
+ case spirv::Decoration::AliasedPointer:
case spirv::Decoration::Flat:
case spirv::Decoration::NonReadable:
case spirv::Decoration::NonWritable:
@@ -275,14 +271,34 @@ LogicalResult Serializer::processDecoration(Location loc, uint32_t resultID,
case spirv::Decoration::NoUnsignedWrap:
case spirv::Decoration::RelaxedPrecision:
case spirv::Decoration::Restrict:
- // For unit attributes, the args list has no values so we do nothing
- if (auto unitAttr = dyn_cast<UnitAttr>(attr.getValue()))
+ case spirv::Decoration::RestrictPointer:
+ // For unit attributes and decoration attributes, the args list
+ // has no values so we do nothing.
+ if (isa<UnitAttr, DecorationAttr>(attr))
break;
- return emitError(loc, "expected unit attribute for ") << attrName;
+ return emitError(loc,
+ "expected unit attribute or decoration attribute for ")
+ << stringifyDecoration(decoration);
default:
- return emitError(loc, "unhandled decoration ") << decorationName;
+ return emitError(loc, "unhandled decoration ")
+ << stringifyDecoration(decoration);
+ }
+ return emitDecoration(resultID, decoration, args);
+}
+
+LogicalResult Serializer::processDecoration(Location loc, uint32_t resultID,
+ NamedAttribute attr) {
+ StringRef attrName = attr.getName().strref();
+ std::string decorationName = getDecorationName(attrName);
+ std::optional<Decoration> decoration =
+ spirv::symbolizeDecoration(decorationName);
+ if (!decoration) {
+ return emitError(
+ loc, "non-argument attributes expected to have snake-case-ified "
+ "decoration name, unhandled attribute with name : ")
+ << attrName;
}
- return emitDecoration(resultID, *decoration, args);
+ return processDecorationAttr(loc, resultID, *decoration, attr.getValue());
}
LogicalResult Serializer::processName(uint32_t resultID, StringRef name) {
diff --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.h b/mlir/lib/Target/SPIRV/Serialization/Serializer.h
index 4b2ebf610bd7..9edb0f4af008 100644
--- a/mlir/lib/Target/SPIRV/Serialization/Serializer.h
+++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.h
@@ -127,6 +127,7 @@ private:
/// Processes a SPIR-V function op.
LogicalResult processFuncOp(spirv::FuncOp op);
+ LogicalResult processFuncParameter(spirv::FuncOp op);
LogicalResult processVariableOp(spirv::VariableOp op);
@@ -134,6 +135,8 @@ private:
LogicalResult processGlobalVariableOp(spirv::GlobalVariableOp varOp);
/// Process attributes that translate to decorations on the result <id>
+ LogicalResult processDecorationAttr(Location loc, uint32_t resultID,
+ Decoration decoration, Attribute attr);
LogicalResult processDecoration(Location loc, uint32_t resultID,
NamedAttribute attr);
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index 82438e2bf706..67c2d9d59f4c 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -434,10 +434,10 @@ bool GreedyPatternRewriteDriver::processWorklist() {
SmallVector<OpFoldResult> foldResults;
if (succeeded(op->fold(foldResults))) {
LLVM_DEBUG(logResultWithLine("success", "operation was folded"));
- changed = true;
if (foldResults.empty()) {
// Op was modified in-place.
notifyOperationModified(op);
+ changed = true;
#if MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
if (config.scope && failed(verify(config.scope->getParentOp())))
llvm::report_fatal_error("IR failed to verify after folding");
@@ -451,6 +451,7 @@ bool GreedyPatternRewriteDriver::processWorklist() {
OpBuilder::InsertionGuard g(*this);
setInsertionPoint(op);
SmallVector<Value> replacements;
+ bool materializationSucceeded = true;
for (auto [ofr, resultType] :
llvm::zip_equal(foldResults, op->getResultTypes())) {
if (auto value = ofr.dyn_cast<Value>()) {
@@ -462,18 +463,41 @@ bool GreedyPatternRewriteDriver::processWorklist() {
// Materialize Attributes as SSA values.
Operation *constOp = op->getDialect()->materializeConstant(
*this, ofr.get<Attribute>(), resultType, op->getLoc());
+
+ if (!constOp) {
+ // If materialization fails, cleanup any operations generated for
+ // the previous results.
+ llvm::SmallDenseSet<Operation *> replacementOps;
+ for (Value replacement : replacements) {
+ assert(replacement.use_empty() &&
+ "folder reused existing op for one result but constant "
+ "materialization failed for another result");
+ replacementOps.insert(replacement.getDefiningOp());
+ }
+ for (Operation *op : replacementOps) {
+ eraseOp(op);
+ }
+
+ materializationSucceeded = false;
+ break;
+ }
+
assert(constOp->hasTrait<OpTrait::ConstantLike>() &&
"materializeConstant produced op that is not a ConstantLike");
assert(constOp->getResultTypes()[0] == resultType &&
"materializeConstant produced incorrect result type");
replacements.push_back(constOp->getResult(0));
}
- replaceOp(op, replacements);
+
+ if (materializationSucceeded) {
+ replaceOp(op, replacements);
+ changed = true;
#if MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
- if (config.scope && failed(verify(config.scope->getParentOp())))
- llvm::report_fatal_error("IR failed to verify after folding");
+ if (config.scope && failed(verify(config.scope->getParentOp())))
+ llvm::report_fatal_error("IR failed to verify after folding");
#endif // MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
- continue;
+ continue;
+ }
}
}
diff --git a/mlir/python/mlir/ir.py b/mlir/python/mlir/ir.py
index 6d21da3b4179..eb7f035fec7c 100644
--- a/mlir/python/mlir/ir.py
+++ b/mlir/python/mlir/ir.py
@@ -263,6 +263,11 @@ def _typeArrayAttr(x, context):
return _arrayAttr([TypeAttr.get(t, context=context) for t in x], context)
+@register_attribute_builder("MemRefTypeAttr")
+def _memref_type_attr(x, context):
+ return _typeAttr(x, context)
+
+
try:
import numpy as np
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index 20a200e812c1..c7f1d4f124c1 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -627,6 +627,15 @@ gpu.module @test_module_31 {
}
}
+gpu.module @gpumodule {
+// CHECK-LABEL: func @kernel_with_block_size()
+// CHECK: attributes {gpu.kernel, gpu.known_block_size = array<i32: 128, 1, 1>, nvvm.kernel, nvvm.maxntid = [128 : i32, 1 : i32, 1 : i32]}
+ gpu.func @kernel_with_block_size() kernel attributes {gpu.known_block_size = array<i32: 128, 1, 1>} {
+ gpu.return
+ }
+}
+
+
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%toplevel_module: !transform.any_op {transform.readonly}) {
%gpu_module = transform.structured.match ops{["gpu.module"]} in %toplevel_module
diff --git a/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir b/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
index 2a5f81544f20..f4fbfdf01196 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
@@ -48,8 +48,8 @@ module attributes {
// CHECK: %{{.+}} = spirv.Load "Workgroup" %[[PTR]] : i32
// CHECK: %[[LOC:.+]] = spirv.SDiv
// CHECK: %[[PTR:.+]] = spirv.AccessChain %[[VAR]][%{{.+}}, %[[LOC]]]
-// CHECK: %{{.+}} = spirv.AtomicAnd "Workgroup" "AcquireRelease" %[[PTR]], %{{.+}} : !spirv.ptr<i32, Workgroup>
-// CHECK: %{{.+}} = spirv.AtomicOr "Workgroup" "AcquireRelease" %[[PTR]], %{{.+}} : !spirv.ptr<i32, Workgroup>
+// CHECK: %{{.+}} = spirv.AtomicAnd <Workgroup> <AcquireRelease> %[[PTR]], %{{.+}} : !spirv.ptr<i32, Workgroup>
+// CHECK: %{{.+}} = spirv.AtomicOr <Workgroup> <AcquireRelease> %[[PTR]], %{{.+}} : !spirv.ptr<i32, Workgroup>
// -----
diff --git a/mlir/test/Conversion/MemRefToSPIRV/atomic.mlir b/mlir/test/Conversion/MemRefToSPIRV/atomic.mlir
index f72e12611a97..4729cccfb622 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/atomic.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/atomic.mlir
@@ -6,7 +6,7 @@ module attributes {spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader
// CHECK-SAME: (%[[VAL:.+]]: i32,
func.func @atomic_addi_storage_buffer(%value: i32, %memref: memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>, %i0: index, %i1: index, %i2: index) -> i32 {
// CHECK: %[[AC:.+]] = spirv.AccessChain
- // CHECK: %[[ATOMIC:.+]] = spirv.AtomicIAdd "Device" "AcquireRelease" %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: %[[ATOMIC:.+]] = spirv.AtomicIAdd <Device> <AcquireRelease> %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
// CHECK: return %[[ATOMIC]]
%0 = memref.atomic_rmw "addi" %value, %memref[%i0, %i1, %i2] : (i32, memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>) -> i32
return %0: i32
@@ -16,7 +16,7 @@ func.func @atomic_addi_storage_buffer(%value: i32, %memref: memref<2x3x4xi32, #s
// CHECK-SAME: (%[[VAL:.+]]: i32,
func.func @atomic_maxs_workgroup(%value: i32, %memref: memref<2x3x4xi32, #spirv.storage_class<Workgroup>>, %i0: index, %i1: index, %i2: index) -> i32 {
// CHECK: %[[AC:.+]] = spirv.AccessChain
- // CHECK: %[[ATOMIC:.+]] = spirv.AtomicSMax "Workgroup" "AcquireRelease" %[[AC]], %[[VAL]] : !spirv.ptr<i32, Workgroup>
+ // CHECK: %[[ATOMIC:.+]] = spirv.AtomicSMax <Workgroup> <AcquireRelease> %[[AC]], %[[VAL]] : !spirv.ptr<i32, Workgroup>
// CHECK: return %[[ATOMIC]]
%0 = memref.atomic_rmw "maxs" %value, %memref[%i0, %i1, %i2] : (i32, memref<2x3x4xi32, #spirv.storage_class<Workgroup>>) -> i32
return %0: i32
@@ -26,7 +26,7 @@ func.func @atomic_maxs_workgroup(%value: i32, %memref: memref<2x3x4xi32, #spirv.
// CHECK-SAME: (%[[VAL:.+]]: i32,
func.func @atomic_maxu_storage_buffer(%value: i32, %memref: memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>, %i0: index, %i1: index, %i2: index) -> i32 {
// CHECK: %[[AC:.+]] = spirv.AccessChain
- // CHECK: %[[ATOMIC:.+]] = spirv.AtomicUMax "Device" "AcquireRelease" %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: %[[ATOMIC:.+]] = spirv.AtomicUMax <Device> <AcquireRelease> %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
// CHECK: return %[[ATOMIC]]
%0 = memref.atomic_rmw "maxu" %value, %memref[%i0, %i1, %i2] : (i32, memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>) -> i32
return %0: i32
@@ -36,7 +36,7 @@ func.func @atomic_maxu_storage_buffer(%value: i32, %memref: memref<2x3x4xi32, #s
// CHECK-SAME: (%[[VAL:.+]]: i32,
func.func @atomic_mins_workgroup(%value: i32, %memref: memref<2x3x4xi32, #spirv.storage_class<Workgroup>>, %i0: index, %i1: index, %i2: index) -> i32 {
// CHECK: %[[AC:.+]] = spirv.AccessChain
- // CHECK: %[[ATOMIC:.+]] = spirv.AtomicSMin "Workgroup" "AcquireRelease" %[[AC]], %[[VAL]] : !spirv.ptr<i32, Workgroup>
+ // CHECK: %[[ATOMIC:.+]] = spirv.AtomicSMin <Workgroup> <AcquireRelease> %[[AC]], %[[VAL]] : !spirv.ptr<i32, Workgroup>
// CHECK: return %[[ATOMIC]]
%0 = memref.atomic_rmw "mins" %value, %memref[%i0, %i1, %i2] : (i32, memref<2x3x4xi32, #spirv.storage_class<Workgroup>>) -> i32
return %0: i32
@@ -46,7 +46,7 @@ func.func @atomic_mins_workgroup(%value: i32, %memref: memref<2x3x4xi32, #spirv.
// CHECK-SAME: (%[[VAL:.+]]: i32,
func.func @atomic_minu_storage_buffer(%value: i32, %memref: memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>, %i0: index, %i1: index, %i2: index) -> i32 {
// CHECK: %[[AC:.+]] = spirv.AccessChain
- // CHECK: %[[ATOMIC:.+]] = spirv.AtomicUMin "Device" "AcquireRelease" %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: %[[ATOMIC:.+]] = spirv.AtomicUMin <Device> <AcquireRelease> %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
// CHECK: return %[[ATOMIC]]
%0 = memref.atomic_rmw "minu" %value, %memref[%i0, %i1, %i2] : (i32, memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>) -> i32
return %0: i32
@@ -56,7 +56,7 @@ func.func @atomic_minu_storage_buffer(%value: i32, %memref: memref<2x3x4xi32, #s
// CHECK-SAME: (%[[VAL:.+]]: i32,
func.func @atomic_ori_workgroup(%value: i32, %memref: memref<2x3x4xi32, #spirv.storage_class<Workgroup>>, %i0: index, %i1: index, %i2: index) -> i32 {
// CHECK: %[[AC:.+]] = spirv.AccessChain
- // CHECK: %[[ATOMIC:.+]] = spirv.AtomicOr "Workgroup" "AcquireRelease" %[[AC]], %[[VAL]] : !spirv.ptr<i32, Workgroup>
+ // CHECK: %[[ATOMIC:.+]] = spirv.AtomicOr <Workgroup> <AcquireRelease> %[[AC]], %[[VAL]] : !spirv.ptr<i32, Workgroup>
// CHECK: return %[[ATOMIC]]
%0 = memref.atomic_rmw "ori" %value, %memref[%i0, %i1, %i2] : (i32, memref<2x3x4xi32, #spirv.storage_class<Workgroup>>) -> i32
return %0: i32
@@ -66,7 +66,7 @@ func.func @atomic_ori_workgroup(%value: i32, %memref: memref<2x3x4xi32, #spirv.s
// CHECK-SAME: (%[[VAL:.+]]: i32,
func.func @atomic_andi_storage_buffer(%value: i32, %memref: memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>, %i0: index, %i1: index, %i2: index) -> i32 {
// CHECK: %[[AC:.+]] = spirv.AccessChain
- // CHECK: %[[ATOMIC:.+]] = spirv.AtomicAnd "Device" "AcquireRelease" %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: %[[ATOMIC:.+]] = spirv.AtomicAnd <Device> <AcquireRelease> %[[AC]], %[[VAL]] : !spirv.ptr<i32, StorageBuffer>
// CHECK: return %[[ATOMIC]]
%0 = memref.atomic_rmw "andi" %value, %memref[%i0, %i1, %i2] : (i32, memref<2x3x4xi32, #spirv.storage_class<StorageBuffer>>) -> i32
return %0: i32
diff --git a/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir b/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir
index 928bd82c2cb2..470c8531e2e0 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/bitwidth-emulation.mlir
@@ -122,8 +122,8 @@ func.func @store_i1(%arg0: memref<i1, #spirv.storage_class<StorageBuffer>>, %val
// CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CASTED_ARG1]], %[[OFFSET]] : i32, i32
// CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
- // CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
- // CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+ // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
memref.store %value, %arg0[] : memref<i1, #spirv.storage_class<StorageBuffer>>
return
}
@@ -147,8 +147,8 @@ func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %val
// CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
// CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
- // CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
- // CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+ // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
// INDEX64-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : i8 to i32
// INDEX64-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
@@ -164,8 +164,8 @@ func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %val
// INDEX64: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i64
// INDEX64: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i64
// INDEX64: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]] : {{.+}}, i64, i64
- // INDEX64: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
- // INDEX64: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+ // INDEX64: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
+ // INDEX64: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
memref.store %value, %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
return
}
@@ -191,8 +191,8 @@ func.func @store_i16(%arg0: memref<10xi16, #spirv.storage_class<StorageBuffer>>,
// CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
// CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[FLAT_IDX]], %[[TWO]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
- // CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
- // CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+ // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
memref.store %value, %arg0[%index] : memref<10xi16, #spirv.storage_class<StorageBuffer>>
return
}
@@ -262,8 +262,8 @@ func.func @store_i4(%arg0: memref<?xi4, #spirv.storage_class<StorageBuffer>>, %v
// CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[BITS]] : i32, i32
// CHECK: %[[ACCESS_INDEX:.+]] = spirv.SDiv %[[OFFSET]], %[[EIGHT]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[ACCESS_INDEX]]]
- // CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK2]]
- // CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+ // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK2]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
memref.store %value, %arg0[%i] : memref<?xi4, #spirv.storage_class<StorageBuffer>>
return
}
@@ -338,8 +338,8 @@ func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %val
// CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
// CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i32
// CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
- // CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
- // CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+ // CHECK: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
+ // CHECK: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
// INDEX64-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
// INDEX64: %[[ZERO:.+]] = spirv.Constant 0 : i64
@@ -355,8 +355,8 @@ func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %val
// INDEX64: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i64
// INDEX64: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR]] : i64
// INDEX64: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]] : {{.+}}, i64, i64
- // INDEX64: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
- // INDEX64: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+ // INDEX64: spirv.AtomicAnd <Device> <AcquireRelease> %[[PTR]], %[[MASK]]
+ // INDEX64: spirv.AtomicOr <Device> <AcquireRelease> %[[PTR]], %[[STORE_VAL]]
memref.store %value, %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
return
}
diff --git a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
index a058365a104a..79eef8ae7eb8 100644
--- a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
+++ b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir
@@ -17,8 +17,8 @@ func.func @one_d_loop(%A : memref<?xf32>, %B : memref<?xf32>) {
// CHECK-BLOCKS-NEXT: %{{.*}} = arith.constant 1 : index
// CHECK-BLOCKS-NEXT: %[[ONE:.*]] = arith.constant 1 : index
- // CHECK-THREADS-NEXT: gpu.launch blocks(%[[B0:.*]], %[[B1:.*]], %[[B2:.*]]) in (%{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]], %{{.*}}0 = %[[ONE]]) threads(%[[T0:.*]], %[[T1:.*]], %[[T2:.*]]) in (%{{.*}} = %[[BOUND]], %{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]])
- // CHECK-BLOCKS-NEXT: gpu.launch blocks(%[[B0:.*]], %[[B1:.*]], %[[B2:.*]]) in (%{{.*}} = %[[BOUND]], %{{.*}} = %[[ONE]], %{{.*}}0 = %[[ONE]]) threads(%[[T0:.*]], %[[T1:.*]], %[[T2:.*]]) in (%{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]])
+ // CHECK-THREADS-NEXT: gpu.launch blocks(%[[B0:.*]], %[[B1:.*]], %[[B2:.*]]) in (%{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]]) threads(%[[T0:.*]], %[[T1:.*]], %[[T2:.*]]) in (%{{.*}} = %[[BOUND]], %{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]])
+ // CHECK-BLOCKS-NEXT: gpu.launch blocks(%[[B0:.*]], %[[B1:.*]], %[[B2:.*]]) in (%{{.*}} = %[[BOUND]], %{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]]) threads(%[[T0:.*]], %[[T1:.*]], %[[T2:.*]]) in (%{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]], %{{.*}} = %[[ONE]])
affine.for %i = 0 to 42 {
// CHECK-THREADS-NEXT: %[[INDEX:.*]] = arith.addi %{{.*}}, %[[T0]]
// CHECK-THREADS-NEXT: memref.load %{{.*}}[%[[INDEX]]]
diff --git a/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir b/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir
index b3991cbdbe8a..b9c56a3fcffd 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/spirv-storage-class-mapping.mlir
@@ -81,7 +81,7 @@ spirv.func @pointerIncomingRayPayloadKHR(!spirv.ptr<i1, IncomingRayPayloadKHR>)
spirv.func @pointerShaderRecordBufferKHR(!spirv.ptr<i1, ShaderRecordBufferKHR>) "None"
// CHECK-ALL: llvm.func @pointerPhysicalStorageBuffer(!llvm.ptr)
-spirv.func @pointerPhysicalStorageBuffer(!spirv.ptr<i1, PhysicalStorageBuffer>) "None"
+spirv.func @pointerPhysicalStorageBuffer(!spirv.ptr<i1, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased> }) "None"
// CHECK-ALL: llvm.func @pointerCodeSectionINTEL(!llvm.ptr)
spirv.func @pointerCodeSectionINTEL(!spirv.ptr<i1, CodeSectionINTEL>) "None"
diff --git a/mlir/test/Dialect/GPU/invalid.mlir b/mlir/test/Dialect/GPU/invalid.mlir
index 8a34d6432607..4d3a898fdd15 100644
--- a/mlir/test/Dialect/GPU/invalid.mlir
+++ b/mlir/test/Dialect/GPU/invalid.mlir
@@ -16,7 +16,7 @@ func.func @no_region_attrs(%sz : index) {
^bb1(%bx: index, %by: index, %bz: index,
%tx: index, %ty: index, %tz: index):
gpu.terminator
- }) {operandSegmentSizes = array<i32: 0, 1, 1, 1, 1, 1, 1, 0>} : (index, index, index, index, index, index) -> ()
+ }) {operandSegmentSizes = array<i32: 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0>} : (index, index, index, index, index, index) -> ()
return
}
diff --git a/mlir/test/Dialect/GPU/outlining.mlir b/mlir/test/Dialect/GPU/outlining.mlir
index 8020f6dfa65b..601add9a9f91 100644
--- a/mlir/test/Dialect/GPU/outlining.mlir
+++ b/mlir/test/Dialect/GPU/outlining.mlir
@@ -407,3 +407,77 @@ func.func @launch_memory_attributions_1(%arg0 : memref<*xf32>) {
}
// CHECK-DL-LABEL: gpu.module @launch_memory_attributions_1_kernel attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32 : i32>>}
+
+// -----
+// CHECK: module attributes {gpu.container_module}
+
+// CHECK-LABEL: func @launch_cluster()
+func.func @launch_cluster() {
+ // CHECK: %[[ARG0:.*]] = "op"() : () -> f32
+ %0 = "op"() : () -> (f32)
+ // CHECK: %[[ARG1:.*]] = "op"() : () -> memref<?xf32, 1>
+ %1 = "op"() : () -> (memref<?xf32, 1>)
+ // CHECK: %[[CDIMX:.*]] = arith.constant 1
+ %cDimX = arith.constant 1 : index
+ // CHECK: %[[CDIMY:.*]] = arith.constant 2
+ %cDimY = arith.constant 2 : index
+ // CHECK: %[[CDIMZ:.*]] = arith.constant 1
+ %cDimZ = arith.constant 1 : index
+ // CHECK: %[[GDIMX:.*]] = arith.constant 8
+ %gDimX = arith.constant 8 : index
+ // CHECK: %[[GDIMY:.*]] = arith.constant 12
+ %gDimY = arith.constant 12 : index
+ // CHECK: %[[GDIMZ:.*]] = arith.constant 16
+ %gDimZ = arith.constant 16 : index
+ // CHECK: %[[BDIMX:.*]] = arith.constant 20
+ %bDimX = arith.constant 20 : index
+ // CHECK: %[[BDIMY:.*]] = arith.constant 24
+ %bDimY = arith.constant 24 : index
+ // CHECK: %[[BDIMZ:.*]] = arith.constant 28
+ %bDimZ = arith.constant 28 : index
+
+ // CHECK: gpu.launch_func @launch_cluster_kernel::@launch_cluster_kernel clusters in (%[[CDIMX]], %[[CDIMY]], %[[CDIMZ]]) blocks in (%[[GDIMX]], %[[GDIMY]], %[[GDIMZ]]) threads in (%[[BDIMX]], %[[BDIMY]], %[[BDIMZ]]) args(%[[ARG0]] : f32, %[[ARG1]] : memref<?xf32, 1>)
+ // CHECK-NOT: gpu.launch blocks
+ gpu.launch clusters(%cx, %cy, %cz) in (%cluster_x = %cDimX, %cluster_y = %cDimY,
+ %cluster_z = %cDimZ)
+ blocks(%bx, %by, %bz) in (%grid_x = %gDimX, %grid_y = %gDimY,
+ %grid_z = %gDimZ)
+ threads(%tx, %ty, %tz) in (%block_x = %bDimX, %block_y = %bDimY,
+ %block_z = %bDimZ) {
+ "use"(%0): (f32) -> ()
+ "some_op"(%cx, %bx, %block_x) : (index, index, index) -> ()
+ %42 = memref.load %1[%tx] : memref<?xf32, 1>
+ gpu.terminator
+ }
+ return
+}
+
+// CHECK-LABEL: gpu.module @launch_cluster_kernel
+// CHECK-NEXT: gpu.func @launch_cluster_kernel
+// CHECK-SAME: (%[[KERNEL_ARG0:.*]]: f32, %[[KERNEL_ARG1:.*]]: memref<?xf32, 1>)
+// CHECK-SAME: gpu.known_block_size = array<i32: 20, 24, 28>
+// CHECK-SAME: gpu.known_grid_size = array<i32: 8, 12, 16>
+// CHECK-NEXT: %[[BID:.*]] = gpu.block_id x
+// CHECK-NEXT: = gpu.block_id y
+// CHECK-NEXT: = gpu.block_id z
+// CHECK-NEXT: %[[TID:.*]] = gpu.thread_id x
+// CHECK-NEXT: = gpu.thread_id y
+// CHECK-NEXT: = gpu.thread_id z
+// CHECK-NEXT: = gpu.grid_dim x
+// CHECK-NEXT: = gpu.grid_dim y
+// CHECK-NEXT: = gpu.grid_dim z
+// CHECK-NEXT: %[[BDIM:.*]] = gpu.block_dim x
+// CHECK-NEXT: = gpu.block_dim y
+// CHECK-NEXT: = gpu.block_dim z
+// CHECK-NEXT: %[[CID:.*]] = gpu.cluster_id x
+// CHECK-NEXT: = gpu.cluster_id y
+// CHECK-NEXT: = gpu.cluster_id z
+// CHECK-NEXT: %[[CDIM:.*]] = gpu.cluster_dim x
+// CHECK-NEXT: = gpu.cluster_dim y
+// CHECK-NEXT: = gpu.cluster_dim z
+// CHECK-NEXT: cf.br ^[[BLOCK:.*]]
+// CHECK-NEXT: ^[[BLOCK]]:
+// CHECK-NEXT: "use"(%[[KERNEL_ARG0]]) : (f32) -> ()
+// CHECK-NEXT: "some_op"(%[[CID]], %[[BID]], %[[BDIM]]) : (index, index, index) -> ()
+// CHECK-NEXT: = memref.load %[[KERNEL_ARG1]][%[[TID]]] : memref<?xf32, 1>
+
diff --git a/mlir/test/Dialect/LLVMIR/add-debuginfo-func-scope.mlir b/mlir/test/Dialect/LLVMIR/add-debuginfo-func-scope.mlir
index eff4c55e6bc0..f63132d42ab7 100644
--- a/mlir/test/Dialect/LLVMIR/add-debuginfo-func-scope.mlir
+++ b/mlir/test/Dialect/LLVMIR/add-debuginfo-func-scope.mlir
@@ -1,17 +1,26 @@
-// RUN: mlir-opt %s --pass-pipeline="builtin.module(llvm.func(ensure-debug-info-scope-on-llvm-func))" --split-input-file --mlir-print-debuginfo | FileCheck %s
-
-
+// RUN: mlir-opt %s --pass-pipeline="builtin.module(ensure-debug-info-scope-on-llvm-func)" --split-input-file --mlir-print-debuginfo | FileCheck %s
// CHECK-LABEL: llvm.func @func_no_debug()
// CHECK: llvm.return loc(#loc
// CHECK: loc(#loc[[LOC:[0-9]+]])
// CHECK: #di_file = #llvm.di_file<"<unknown>" in "">
-// CHECK: #di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "func_no_debug", linkageName = "func_no_debug", file = #di_file, line = 1, scopeLine = 1, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
+// CHECK: #di_subprogram = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #di_compile_unit, scope = #di_file, name = "func_no_debug", linkageName = "func_no_debug", file = #di_file, line = 1, scopeLine = 1, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
// CHECK: #loc[[LOC]] = loc(fused<#di_subprogram>
-llvm.func @func_no_debug() {
- llvm.return loc(unknown)
+module {
+ llvm.func @func_no_debug() {
+ llvm.return loc(unknown)
+ } loc(unknown)
} loc(unknown)
+// -----
+
+// Test that the declarations subprogram is not made distinct.
+// CHECK-LABEL: llvm.func @func_decl_no_debug()
+// CHECK: #di_subprogram = #llvm.di_subprogram<
+// CHECK-NOT: id = distinct
+module {
+ llvm.func @func_decl_no_debug() loc(unknown)
+} loc(unknown)
// -----
@@ -20,7 +29,7 @@ llvm.func @func_no_debug() {
// CHECK: llvm.return loc(#loc
// CHECK: loc(#loc[[LOC:[0-9]+]])
// CHECK: #di_file = #llvm.di_file<"<unknown>" in "">
-// CHECK: #di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "func_with_debug", linkageName = "func_with_debug", file = #di_file, line = 42, scopeLine = 42, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
+// CHECK: #di_subprogram = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #di_compile_unit, scope = #di_file, name = "func_with_debug", linkageName = "func_with_debug", file = #di_file, line = 42, scopeLine = 42, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
// CHECK: #loc[[LOC]] = loc(fused<#di_subprogram>
module {
llvm.func @func_with_debug() {
@@ -31,8 +40,8 @@ module {
#di_subroutine_type = #llvm.di_subroutine_type<callingConvention = DW_CC_normal>
#loc = loc("foo":0:0)
#loc1 = loc(unknown)
-#di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
-#di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "func_with_debug", linkageName = "func_with_debug", file = #di_file, line = 42, scopeLine = 42, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
+#di_subprogram = #llvm.di_subprogram<id = distinct[1]<>, compileUnit = #di_compile_unit, scope = #di_file, name = "func_with_debug", linkageName = "func_with_debug", file = #di_file, line = 42, scopeLine = 42, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
#loc2 = loc(fused<#di_subprogram>[#loc1])
// -----
@@ -45,8 +54,8 @@ module {
// CHECK-DAG: #[[DI_FILE_MODULE:.+]] = #llvm.di_file<"bar.mlir" in "baz">
// CHECK-DAG: #[[DI_FILE_FUNC:.+]] = #llvm.di_file<"file.mlir" in "">
// CHECK-DAG: #loc[[FUNCFILELOC:[0-9]+]] = loc("file.mlir":9:8)
-// CHECK-DAG: #di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #[[DI_FILE_MODULE]], producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
-// CHECK-DAG: #di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #[[DI_FILE_FUNC]], name = "propagate_compile_unit", linkageName = "propagate_compile_unit", file = #[[DI_FILE_FUNC]], line = 9, scopeLine = 8, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
+// CHECK-DAG: #di_compile_unit = #llvm.di_compile_unit<id = distinct[{{.*}}]<>, sourceLanguage = DW_LANG_C, file = #[[DI_FILE_MODULE]], producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
+// CHECK-DAG: #di_subprogram = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #di_compile_unit, scope = #[[DI_FILE_FUNC]], name = "propagate_compile_unit", linkageName = "propagate_compile_unit", file = #[[DI_FILE_FUNC]], line = 9, scopeLine = 8, subprogramFlags = "Definition|Optimized", type = #di_subroutine_type>
// CHECK-DAG: #loc[[MODULELOC]] = loc(fused<#di_compile_unit>[#loc])
// CHECK-DAG: #loc[[FUNCLOC]] = loc(fused<#di_subprogram>[#loc[[FUNCFILELOC]]
module {
@@ -55,5 +64,20 @@ module {
} loc("file.mlir":9:8)
} loc(#loc)
#di_file = #llvm.di_file<"bar.mlir" in "baz">
-#di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
#loc = loc(fused<#di_compile_unit>["foo.mlir":2:1])
+
+// -----
+
+// Test that only one compile unit is created.
+// CHECK-LABEL: module @multiple_funcs
+// CHECK: llvm.di_compile_unit
+// CHECK-NOT: llvm.di_compile_unit
+module @multiple_funcs {
+ llvm.func @func0() {
+ llvm.return loc(unknown)
+ } loc(unknown)
+ llvm.func @func1() {
+ llvm.return loc(unknown)
+ } loc(unknown)
+} loc(unknown)
diff --git a/mlir/test/Dialect/LLVMIR/call-location.mlir b/mlir/test/Dialect/LLVMIR/call-location.mlir
index 1b473743bcd0..4a98c9e8d720 100644
--- a/mlir/test/Dialect/LLVMIR/call-location.mlir
+++ b/mlir/test/Dialect/LLVMIR/call-location.mlir
@@ -2,7 +2,7 @@
#di_file = #llvm.di_file<"file.cpp" in "/folder/">
#di_compile_unit = #llvm.di_compile_unit<
- sourceLanguage = DW_LANG_C_plus_plus_14, file = #di_file,
+ id = distinct[0]<>, sourceLanguage = DW_LANG_C_plus_plus_14, file = #di_file,
isOptimized = true, emissionKind = Full
>
#di_subprogram = #llvm.di_subprogram<
diff --git a/mlir/test/Dialect/LLVMIR/debuginfo.mlir b/mlir/test/Dialect/LLVMIR/debuginfo.mlir
index 53c38b479703..cef2ced391d6 100644
--- a/mlir/test/Dialect/LLVMIR/debuginfo.mlir
+++ b/mlir/test/Dialect/LLVMIR/debuginfo.mlir
@@ -3,10 +3,10 @@
// CHECK-DAG: #[[FILE:.*]] = #llvm.di_file<"debuginfo.mlir" in "/test/">
#file = #llvm.di_file<"debuginfo.mlir" in "/test/">
-// CHECK-DAG: #[[CU:.*]] = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #[[FILE]], producer = "MLIR", isOptimized = true, emissionKind = Full>
+// CHECK-DAG: #[[CU:.*]] = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #[[FILE]], producer = "MLIR", isOptimized = true, emissionKind = Full>
#cu = #llvm.di_compile_unit<
- sourceLanguage = DW_LANG_C, file = #file, producer = "MLIR",
- isOptimized = true, emissionKind = Full
+ id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #file,
+ producer = "MLIR", isOptimized = true, emissionKind = Full
>
// CHECK-DAG: #[[NULL:.*]] = #llvm.di_null_type
diff --git a/mlir/test/Dialect/LLVMIR/global.mlir b/mlir/test/Dialect/LLVMIR/global.mlir
index 8133aa8913f3..0649e814bfdf 100644
--- a/mlir/test/Dialect/LLVMIR/global.mlir
+++ b/mlir/test/Dialect/LLVMIR/global.mlir
@@ -263,7 +263,7 @@ llvm.mlir.global @target_fail(0 : i64) : !llvm.target<"spirv.Image", i32, 0>
// CHECK-DAG: #[[TYPE:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "uint64_t", sizeInBits = 64, encoding = DW_ATE_unsigned>
// CHECK-DAG: #[[FILE:.*]] = #llvm.di_file<"not" in "existence">
-// CHECK-DAG: #[[CU:.*]] = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #[[FILE]], producer = "MLIR", isOptimized = true, emissionKind = Full>
+// CHECK-DAG: #[[CU:.*]] = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #[[FILE]], producer = "MLIR", isOptimized = true, emissionKind = Full>
// CHECK-DAG: #[[GVAR0:.*]] = #llvm.di_global_variable<scope = #[[CU]], name = "global_with_expr_1", linkageName = "global_with_expr_1", file = #[[FILE]], line = 370, type = #[[TYPE]], isLocalToUnit = true, isDefined = true, alignInBits = 8>
// CHECK-DAG: #[[GVAR1:.*]] = #llvm.di_global_variable<scope = #[[CU]], name = "global_with_expr_2", linkageName = "global_with_expr_2", file = #[[FILE]], line = 371, type = #[[TYPE]], isLocalToUnit = true, isDefined = true, alignInBits = 8>
// CHECK-DAG: #[[GVAR2:.*]] = #llvm.di_global_variable<scope = #[[CU]], name = "global_with_expr_3", linkageName = "global_with_expr_3", file = #[[FILE]], line = 372, type = #[[TYPE]], isLocalToUnit = true, isDefined = true, alignInBits = 8>
@@ -278,7 +278,7 @@ llvm.mlir.global @target_fail(0 : i64) : !llvm.target<"spirv.Image", i32, 0>
// CHECK-DAG: llvm.mlir.global external @global_with_expr4() {addr_space = 0 : i32, dbg_expr = #[[EXPR3]]} : i64
#di_file = #llvm.di_file<"not" in "existence">
-#di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR", isOptimized = true, emissionKind = Full>
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR", isOptimized = true, emissionKind = Full>
#di_basic_type = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "uint64_t", sizeInBits = 64, encoding = DW_ATE_unsigned>
llvm.mlir.global external @global_with_expr1() {addr_space = 0 : i32, dbg_expr = #llvm.di_global_variable_expression<var = <scope = #di_compile_unit, name = "global_with_expr_1", linkageName = "global_with_expr_1", file = #di_file, line = 370, type = #di_basic_type, isLocalToUnit = true, isDefined = true, alignInBits = 8>, expr = <>>} : i64
llvm.mlir.global external @global_with_expr2() {addr_space = 0 : i32, dbg_expr = #llvm.di_global_variable_expression<var = <scope = #di_compile_unit, name = "global_with_expr_2", linkageName = "global_with_expr_2", file = #di_file, line = 371, type = #di_basic_type, isLocalToUnit = true, isDefined = true, alignInBits = 8>, expr = <[DW_OP_push_object_address, DW_OP_deref]>>} : i64
diff --git a/mlir/test/Dialect/LLVMIR/inlining.mlir b/mlir/test/Dialect/LLVMIR/inlining.mlir
index 63e7a46f1bdb..3af8753bc318 100644
--- a/mlir/test/Dialect/LLVMIR/inlining.mlir
+++ b/mlir/test/Dialect/LLVMIR/inlining.mlir
@@ -644,3 +644,28 @@ llvm.func @caller(%ptr : !llvm.ptr) -> i32 {
llvm.store %c5, %ptr { access_groups = [#caller] } : i32, !llvm.ptr
llvm.return %0 : i32
}
+
+// -----
+
+llvm.func @vararg_func(...) {
+ llvm.return
+}
+
+llvm.func @vararg_intrinrics() {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ %list = llvm.alloca %0 x !llvm.struct<"struct.va_list_opaque", (ptr)> : (i32) -> !llvm.ptr
+ // The vararg intinriscs should normally be part of a variadic function.
+ // However, this test uses a non-variadic function to ensure the presence of
+ // the intrinsic alone suffices to prevent inlining.
+ llvm.intr.vastart %list : !llvm.ptr
+ llvm.return
+}
+
+// CHECK-LABEL: func @caller
+llvm.func @caller() {
+ // CHECK-NEXT: llvm.call @vararg_func()
+ llvm.call @vararg_func() vararg(!llvm.func<void (...)>) : () -> ()
+ // CHECK-NEXT: llvm.call @vararg_intrinrics()
+ llvm.call @vararg_intrinrics() : () -> ()
+ llvm.return
+}
diff --git a/mlir/test/Dialect/LLVMIR/invalid-call-location.mlir b/mlir/test/Dialect/LLVMIR/invalid-call-location.mlir
index ff819b656812..38b4ed9f6e83 100644
--- a/mlir/test/Dialect/LLVMIR/invalid-call-location.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid-call-location.mlir
@@ -5,8 +5,8 @@
#di_file = #llvm.di_file<"file.cpp" in "/folder/">
#di_compile_unit = #llvm.di_compile_unit<
- sourceLanguage = DW_LANG_C_plus_plus_14, file = #di_file,
- isOptimized = true, emissionKind = Full
+ id = distinct[0]<>, sourceLanguage = DW_LANG_C_plus_plus_14,
+ file = #di_file, isOptimized = true, emissionKind = Full
>
#di_subprogram = #llvm.di_subprogram<
compileUnit = #di_compile_unit, scope = #di_file,
diff --git a/mlir/test/Dialect/LLVMIR/loop-metadata.mlir b/mlir/test/Dialect/LLVMIR/loop-metadata.mlir
index bae20d1d6818..97d3aa8f7ec7 100644
--- a/mlir/test/Dialect/LLVMIR/loop-metadata.mlir
+++ b/mlir/test/Dialect/LLVMIR/loop-metadata.mlir
@@ -90,7 +90,7 @@ llvm.func @loop_annotation() {
// CHECK: #[[END_LOC:.*]] = loc("loop-metadata.mlir":52:4)
#loc2 = loc("loop-metadata.mlir":52:4)
-#di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file, isOptimized = false, emissionKind = None>
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file, isOptimized = false, emissionKind = None>
// CHECK: #[[SUBPROGRAM:.*]] = #llvm.di_subprogram<
#di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "loop_locs", file = #di_file, subprogramFlags = Definition>
diff --git a/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir b/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir
index bb96256f3af2..f7ddb4a7abe5 100644
--- a/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir
+++ b/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir
@@ -5,7 +5,7 @@ llvm.func @use_ptr(!llvm.ptr)
#di_basic_type = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "ptr sized type", sizeInBits = 64>
#di_file = #llvm.di_file<"test.ll" in "">
-#di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C_plus_plus_14, file = #di_file, producer = "clang", isOptimized = false, emissionKind = Full>
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C_plus_plus_14, file = #di_file, producer = "clang", isOptimized = false, emissionKind = Full>
#di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "blah", linkageName = "blah", file = #di_file, line = 7, subprogramFlags = Definition>
// CHECK: #[[$VAR:.*]] = #llvm.di_local_variable<{{.*}}name = "ptr sized var"{{.*}}>
#di_local_variable = #llvm.di_local_variable<scope = #di_subprogram, name = "ptr sized var", file = #di_file, line = 7, arg = 1, type = #di_basic_type>
diff --git a/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir b/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir
index ed0e19534c17..cc0abd3a42dc 100644
--- a/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir
@@ -5,15 +5,15 @@
//===----------------------------------------------------------------------===//
func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicAnd "Device" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicAnd "Device" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicAnd <Device> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicAnd <Device> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
// -----
func.func @atomic_and(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : i32) -> i32 {
- // expected-error @+1 {{pointer operand must point to an integer value, found 'f32'}}
+ // expected-error @+1 {{'spirv.AtomicAnd' op failed to verify that `value` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicAnd"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<f32, StorageBuffer>, i32) -> (i32)
return %0 : i32
}
@@ -22,7 +22,7 @@ func.func @atomic_and(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : i32) -> i3
// -----
func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i64) -> i64 {
- // expected-error @+1 {{expected value to have the same type as the pointer operand's pointee type 'i32', but found 'i64'}}
+ // expected-error @+1 {{'spirv.AtomicAnd' op failed to verify that `value` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicAnd"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, StorageBuffer>, i64) -> (i64)
return %0 : i64
}
@@ -31,7 +31,7 @@ func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i64) -> i6
func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
// expected-error @+1 {{expected at most one of these four memory constraints to be set: `Acquire`, `Release`,`AcquireRelease` or `SequentiallyConsistent`}}
- %0 = spirv.AtomicAnd "Device" "Acquire|Release" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicAnd <Device> <Acquire|Release> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -42,15 +42,15 @@ func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i3
//===----------------------------------------------------------------------===//
func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
- // CHECK: spirv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %0 = spirv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicCompareExchange <Workgroup> <Release> <Acquire> %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %0 = spirv.AtomicCompareExchange <Workgroup> <Release> <Acquire> %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
return %0: i32
}
// -----
func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64, %comparator: i32) -> i32 {
- // expected-error @+1 {{value operand must have the same type as the op result, but found 'i64' vs 'i32'}}
+ // expected-error @+1 {{'spirv.AtomicCompareExchange' op failed to verify that `value` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i64, i32) -> (i32)
return %0: i32
}
@@ -58,7 +58,7 @@ func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64
// -----
func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i16) -> i32 {
- // expected-error @+1 {{comparator operand must have the same type as the op result, but found 'i16' vs 'i32'}}
+ // expected-error @+1 {{'spirv.AtomicCompareExchange' op failed to verify that `comparator` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i32, i16) -> (i32)
return %0: i32
}
@@ -66,7 +66,7 @@ func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32
// -----
func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32, %comparator: i32) -> i32 {
- // expected-error @+1 {{pointer operand's pointee type must have the same as the op result type, but found 'i64' vs 'i32'}}
+ // expected-error @+1 {{spirv.AtomicCompareExchange' op failed to verify that `result` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i64, Workgroup>, i32, i32) -> (i32)
return %0: i32
}
@@ -78,15 +78,15 @@ func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32
//===----------------------------------------------------------------------===//
func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
- // CHECK: spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %0 = spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicCompareExchangeWeak <Workgroup> <Release> <Acquire> %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %0 = spirv.AtomicCompareExchangeWeak <Workgroup> <Release> <Acquire> %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
return %0: i32
}
// -----
func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64, %comparator: i32) -> i32 {
- // expected-error @+1 {{value operand must have the same type as the op result, but found 'i64' vs 'i32'}}
+ // expected-error @+1 {{'spirv.AtomicCompareExchangeWeak' op failed to verify that `value` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i64, i32) -> (i32)
return %0: i32
}
@@ -94,7 +94,7 @@ func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value
// -----
func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i16) -> i32 {
- // expected-error @+1 {{comparator operand must have the same type as the op result, but found 'i16' vs 'i32'}}
+ // expected-error @+1 {{'spirv.AtomicCompareExchangeWeak' op failed to verify that `comparator` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i32, i16) -> (i32)
return %0: i32
}
@@ -102,7 +102,7 @@ func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value
// -----
func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32, %comparator: i32) -> i32 {
- // expected-error @+1 {{pointer operand's pointee type must have the same as the op result type, but found 'i64' vs 'i32'}}
+ // expected-error @+1 {{'spirv.AtomicCompareExchangeWeak' op failed to verify that `result` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i64, Workgroup>, i32, i32) -> (i32)
return %0: i32
}
@@ -114,15 +114,15 @@ func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i64, Workgroup>, %value
//===----------------------------------------------------------------------===//
func.func @atomic_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32) -> i32 {
- // CHECK: spirv.AtomicExchange "Workgroup" "Release" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %0 = spirv.AtomicExchange "Workgroup" "Release" %ptr, %value: !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicExchange <Workgroup> <Release> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %0 = spirv.AtomicExchange <Workgroup> <Release> %ptr, %value: !spirv.ptr<i32, Workgroup>
return %0: i32
}
// -----
func.func @atomic_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64) -> i32 {
- // expected-error @+1 {{value operand must have the same type as the op result, but found 'i64' vs 'i32'}}
+ // expected-error @+1 {{'spirv.AtomicExchange' op failed to verify that `value` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicExchange"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i64) -> (i32)
return %0: i32
}
@@ -130,7 +130,7 @@ func.func @atomic_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64) -> i32
// -----
func.func @atomic_exchange(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32) -> i32 {
- // expected-error @+1 {{pointer operand's pointee type must have the same as the op result type, but found 'i64' vs 'i32'}}
+ // expected-error @+1 {{'spirv.AtomicExchange' op failed to verify that `value` type matches pointee type of `pointer`}}
%0 = "spirv.AtomicExchange"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i64, Workgroup>, i32) -> (i32)
return %0: i32
}
@@ -142,8 +142,8 @@ func.func @atomic_exchange(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32) -> i32
//===----------------------------------------------------------------------===//
func.func @atomic_iadd(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicIAdd "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicIAdd "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicIAdd <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicIAdd <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -152,8 +152,8 @@ func.func @atomic_iadd(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i
//===----------------------------------------------------------------------===//
func.func @atomic_idecrement(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 {
- // CHECK: spirv.AtomicIDecrement "Workgroup" "None" %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicIDecrement "Workgroup" "None" %ptr : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicIDecrement <Workgroup> <None> %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicIDecrement <Workgroup> <None> %ptr : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -162,8 +162,8 @@ func.func @atomic_idecrement(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 {
//===----------------------------------------------------------------------===//
func.func @atomic_iincrement(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 {
- // CHECK: spirv.AtomicIIncrement "Workgroup" "None" %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicIIncrement "Workgroup" "None" %ptr : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicIIncrement <Workgroup> <None> %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicIIncrement <Workgroup> <None> %ptr : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -172,8 +172,8 @@ func.func @atomic_iincrement(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 {
//===----------------------------------------------------------------------===//
func.func @atomic_isub(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicISub "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicISub "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicISub <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicISub <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -182,8 +182,8 @@ func.func @atomic_isub(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i
//===----------------------------------------------------------------------===//
func.func @atomic_or(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicOr "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicOr "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicOr <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicOr <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -192,8 +192,8 @@ func.func @atomic_or(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32
//===----------------------------------------------------------------------===//
func.func @atomic_smax(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicSMax "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicSMax "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicSMax <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicSMax <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -202,8 +202,8 @@ func.func @atomic_smax(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i
//===----------------------------------------------------------------------===//
func.func @atomic_smin(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicSMin "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicSMin "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicSMin <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicSMin <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -212,8 +212,8 @@ func.func @atomic_smin(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i
//===----------------------------------------------------------------------===//
func.func @atomic_umax(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicUMax "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicUMax "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicUMax <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicUMax <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -222,8 +222,8 @@ func.func @atomic_umax(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i
//===----------------------------------------------------------------------===//
func.func @atomic_umin(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicUMin "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicUMin "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicUMin <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicUMin <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -232,8 +232,8 @@ func.func @atomic_umin(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i
//===----------------------------------------------------------------------===//
func.func @atomic_xor(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
- // CHECK: spirv.AtomicXor "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
- %0 = spirv.AtomicXor "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
+ // CHECK: spirv.AtomicXor <Workgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicXor <Workgroup> <None> %ptr, %value : !spirv.ptr<i32, StorageBuffer>
return %0 : i32
}
@@ -244,15 +244,15 @@ func.func @atomic_xor(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i3
//===----------------------------------------------------------------------===//
func.func @atomic_fadd(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : f32) -> f32 {
- // CHECK: spirv.EXT.AtomicFAdd "Device" "None" %{{.*}}, %{{.*}} : !spirv.ptr<f32, StorageBuffer>
- %0 = spirv.EXT.AtomicFAdd "Device" "None" %ptr, %value : !spirv.ptr<f32, StorageBuffer>
+ // CHECK: spirv.EXT.AtomicFAdd <Device> <None> %{{.*}}, %{{.*}} : !spirv.ptr<f32, StorageBuffer>
+ %0 = spirv.EXT.AtomicFAdd <Device> <None> %ptr, %value : !spirv.ptr<f32, StorageBuffer>
return %0 : f32
}
// -----
func.func @atomic_fadd(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : f32) -> f32 {
- // expected-error @+1 {{pointer operand must point to an float value, found 'i32'}}
+ // expected-error @+1 {{'spirv.EXT.AtomicFAdd' op failed to verify that `result` type matches pointee type of `pointer`}}
%0 = "spirv.EXT.AtomicFAdd"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, StorageBuffer>, f32) -> (f32)
return %0 : f32
}
@@ -260,7 +260,7 @@ func.func @atomic_fadd(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : f32) -> f
// -----
func.func @atomic_fadd(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : f64) -> f64 {
- // expected-error @+1 {{expected value to have the same type as the pointer operand's pointee type 'f32', but found 'f64'}}
+ // expected-error @+1 {{'spirv.EXT.AtomicFAdd' op failed to verify that `result` type matches pointee type of `pointer`}}
%0 = "spirv.EXT.AtomicFAdd"(%ptr, %value) {memory_scope = #spirv.scope<Device>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<f32, StorageBuffer>, f64) -> (f64)
return %0 : f64
}
@@ -269,6 +269,6 @@ func.func @atomic_fadd(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : f64) -> f
func.func @atomic_fadd(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : f32) -> f32 {
// expected-error @+1 {{expected at most one of these four memory constraints to be set: `Acquire`, `Release`,`AcquireRelease` or `SequentiallyConsistent`}}
- %0 = spirv.EXT.AtomicFAdd "Device" "Acquire|Release" %ptr, %value : !spirv.ptr<f32, StorageBuffer>
+ %0 = spirv.EXT.AtomicFAdd <Device> <Acquire|Release> %ptr, %value : !spirv.ptr<f32, StorageBuffer>
return %0 : f32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/availability.mlir b/mlir/test/Dialect/SPIRV/IR/availability.mlir
index fb95a0c567b9..c583a48eba27 100644
--- a/mlir/test/Dialect/SPIRV/IR/availability.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/availability.mlir
@@ -16,7 +16,7 @@ func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value
// CHECK: max version: v1.3
// CHECK: extensions: [ ]
// CHECK: capabilities: [ [Kernel] ]
- %0 = spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
+ %0 = spirv.AtomicCompareExchangeWeak <Workgroup> <Release> <Acquire> %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
return %0: i32
}
diff --git a/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir b/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir
index 4f4a72da7c05..e289dbf28ad2 100644
--- a/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir
@@ -414,7 +414,7 @@ spirv.module Physical64 OpenCL requires #spirv.vce<v1.0, [Kernel, Addresses], []
// -----
spirv.module PhysicalStorageBuffer64 OpenCL requires #spirv.vce<v1.0, [Kernel, Addresses, PhysicalStorageBufferAddresses], []> {
- spirv.func @covert_ptr_to_u_PhysicalStorageBuffer(%arg0 : !spirv.ptr<i32, PhysicalStorageBuffer>) "None" {
+ spirv.func @covert_ptr_to_u_PhysicalStorageBuffer(%arg0 : !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased> }) "None" {
// CHECK: {{%.*}} = spirv.ConvertPtrToU {{%.*}} : !spirv.ptr<i32, PhysicalStorageBuffer> to i32
%0 = spirv.ConvertPtrToU %arg0 : !spirv.ptr<i32, PhysicalStorageBuffer> to i32
spirv.Return
diff --git a/mlir/test/Dialect/SPIRV/IR/function-decorations.mlir b/mlir/test/Dialect/SPIRV/IR/function-decorations.mlir
index 2e39421df13c..d915f8820c4f 100644
--- a/mlir/test/Dialect/SPIRV/IR/function-decorations.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/function-decorations.mlir
@@ -17,3 +17,59 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader, Linkage], []> {
}
spirv.func @inside.func() -> () "Pure" attributes {} {spirv.Return}
}
+
+// -----
+
+// CHECK: spirv.func @arg_decoration_pointer(%{{.+}}: !spirv.ptr<i32, PhysicalStorageBuffer> {spirv.decoration = #spirv.decoration<Aliased>}) "None"
+spirv.func @arg_decoration_pointer(%arg0: !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased> }) "None" {
+ spirv.Return
+}
+
+// -----
+
+// CHECK: spirv.func @arg_decoration_pointer(%{{.+}}: !spirv.ptr<i32, PhysicalStorageBuffer> {spirv.decoration = #spirv.decoration<Restrict>}) "None"
+spirv.func @arg_decoration_pointer(%arg0: !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Restrict> }) "None" {
+ spirv.Return
+}
+
+// -----
+
+// CHECK: spirv.func @arg_decoration_pointer(%{{.+}}: !spirv.ptr<!spirv.ptr<i32, PhysicalStorageBuffer>, Generic> {spirv.decoration = #spirv.decoration<AliasedPointer>}) "None"
+spirv.func @arg_decoration_pointer(%arg0: !spirv.ptr<!spirv.ptr<i32, PhysicalStorageBuffer>, Generic> { spirv.decoration = #spirv.decoration<AliasedPointer> }) "None" {
+ spirv.Return
+}
+
+// -----
+
+// CHECK: spirv.func @arg_decoration_pointer(%{{.+}}: !spirv.ptr<!spirv.ptr<i32, PhysicalStorageBuffer>, Generic> {spirv.decoration = #spirv.decoration<RestrictPointer>}) "None"
+spirv.func @arg_decoration_pointer(%arg0: !spirv.ptr<!spirv.ptr<i32, PhysicalStorageBuffer>, Generic> { spirv.decoration = #spirv.decoration<RestrictPointer> }) "None" {
+ spirv.Return
+}
+
+// -----
+
+// expected-error @+1 {{'spirv.func' op with physical buffer pointer must be decorated either 'Aliased' or 'Restrict'}}
+spirv.func @no_arg_decoration_pointer(%arg0: !spirv.ptr<i32, PhysicalStorageBuffer>) "None" {
+ spirv.Return
+}
+
+// -----
+
+// expected-error @+1 {{'spirv.func' op with a pointer points to a physical buffer pointer must be decorated either 'AliasedPointer' or 'RestrictPointer'}}
+spirv.func @no_arg_decoration_pointer(%arg0: !spirv.ptr<!spirv.ptr<i32, PhysicalStorageBuffer>, Function>) "None" {
+ spirv.Return
+}
+
+// -----
+
+// expected-error @+1 {{'spirv.func' op with physical buffer pointer must be decorated either 'Aliased' or 'Restrict'}}
+spirv.func @no_decoration_name_attr(%arg0 : !spirv.ptr<i32, PhysicalStorageBuffer> { random_attr = #spirv.decoration<Aliased> }) "None" {
+ spirv.Return
+}
+
+// -----
+
+// expected-error @+1 {{'spirv.func' op arguments may only have dialect attributes}}
+spirv.func @no_decoration_name_attr(%arg0 : !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Restrict>, random_attr = #spirv.decoration<Aliased> }) "None" {
+ spirv.Return
+}
diff --git a/mlir/test/Dialect/SPIRV/IR/target-env.mlir b/mlir/test/Dialect/SPIRV/IR/target-env.mlir
index 5f3292dd6814..273aa33e15ea 100644
--- a/mlir/test/Dialect/SPIRV/IR/target-env.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/target-env.mlir
@@ -49,7 +49,7 @@ func.func @main() {
func.func @cmp_exchange_weak_suitable_version_capabilities(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 attributes {
spirv.target_env = #spirv.target_env<#spirv.vce<v1.1, [Kernel, AtomicStorage], []>, #spirv.resource_limits<>>
} {
- // CHECK: spirv.AtomicCompareExchangeWeak "Workgroup" "AcquireRelease|AtomicCounterMemory" "Acquire"
+ // CHECK: spirv.AtomicCompareExchangeWeak <Workgroup> <AcquireRelease|AtomicCounterMemory> <Acquire>
%0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spirv.ptr<i32, Workgroup>, i32, i32) -> (i32)
return %0: i32
}
diff --git a/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir b/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir
index d1937c44262f..8d663b4edc45 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/inlining.mlir
@@ -206,7 +206,7 @@ spirv.module Logical GLSL450 {
// CHECK: [[STOREPTR:%.*]] = spirv.AccessChain [[ADDRESS_ARG1]]
%7 = spirv.AccessChain %3[%1] : !spirv.ptr<!spirv.struct<(i32 [0])>, StorageBuffer>, i32
// CHECK-NOT: spirv.FunctionCall
- // CHECK: spirv.AtomicIAdd "Device" "AcquireRelease" [[STOREPTR]], [[VAL]]
+ // CHECK: spirv.AtomicIAdd <Device> <AcquireRelease> [[STOREPTR]], [[VAL]]
// CHECK: spirv.Branch
spirv.FunctionCall @atomic_add(%5, %7) : (i32, !spirv.ptr<i32, StorageBuffer>) -> ()
spirv.Branch ^bb2
@@ -217,7 +217,7 @@ spirv.module Logical GLSL450 {
spirv.Return
}
spirv.func @atomic_add(%arg0: i32, %arg1: !spirv.ptr<i32, StorageBuffer>) "None" {
- %0 = spirv.AtomicIAdd "Device" "AcquireRelease" %arg1, %arg0 : !spirv.ptr<i32, StorageBuffer>
+ %0 = spirv.AtomicIAdd <Device> <AcquireRelease> %arg1, %arg0 : !spirv.ptr<i32, StorageBuffer>
spirv.Return
}
spirv.EntryPoint "GLCompute" @inline_into_selection_region
diff --git a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
index 4eaa21d2f94e..931034f3d5f6 100644
--- a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
+++ b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
@@ -66,7 +66,7 @@ spirv.module PhysicalStorageBuffer64 GLSL450 attributes {
spirv.target_env = #spirv.target_env<
#spirv.vce<v1.0, [Shader, PhysicalStorageBufferAddresses], [SPV_EXT_physical_storage_buffer]>, #spirv.resource_limits<>>
} {
- spirv.func @physical_ptr(%val : !spirv.ptr<f32, PhysicalStorageBuffer>) "None" {
+ spirv.func @physical_ptr(%val : !spirv.ptr<f32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased> }) "None" {
spirv.Return
}
}
diff --git a/mlir/test/Target/LLVMIR/Import/debug-info.ll b/mlir/test/Target/LLVMIR/Import/debug-info.ll
index f8bf00bbf3f6..9ef6580bcf24 100644
--- a/mlir/test/Target/LLVMIR/Import/debug-info.ll
+++ b/mlir/test/Target/LLVMIR/Import/debug-info.ll
@@ -30,8 +30,8 @@ define i32 @instruction_loc(i32 %arg1) {
}
; CHECK-DAG: #[[RAW_FILE_LOC:.+]] = loc("debug-info.ll":1:2)
-; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #{{.*}}, name = "instruction_loc"
-; CHECK-DAG: #[[CALLEE:.+]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #{{.*}}, name = "callee"
+; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #{{.*}}, name = "instruction_loc"
+; CHECK-DAG: #[[CALLEE:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #{{.*}}, name = "callee"
; CHECK-DAG: #[[FILE_LOC]] = loc(fused<#[[SP]]>[#[[RAW_FILE_LOC]]])
; CHECK-DAG: #[[RAW_CALLEE_LOC:.+]] = loc("debug-info.ll":7:4)
; CHECK-DAG: #[[CALLEE_LOC:.+]] = loc(fused<#[[CALLEE]]>[#[[RAW_CALLEE_LOC]]])
@@ -63,7 +63,7 @@ define i32 @lexical_block(i32 %arg1) {
ret i32 %2
}
; CHECK: #[[FILE:.+]] = #llvm.di_file<"debug-info.ll" in "/">
-; CHECK: #[[SP:.+]] = #llvm.di_subprogram<compileUnit =
+; CHECK: #[[SP:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit =
; CHECK: #[[LB0:.+]] = #llvm.di_lexical_block<scope = #[[SP]]>
; CHECK: #[[LB1:.+]] = #llvm.di_lexical_block<scope = #[[SP]], file = #[[FILE]], line = 2, column = 2>
; CHECK: #[[LOC0]] = loc(fused<#[[LB0]]>[{{.*}}])
@@ -93,7 +93,7 @@ define i32 @lexical_block_file(i32 %arg1) {
ret i32 %2
}
; CHECK: #[[FILE:.+]] = #llvm.di_file<"debug-info.ll" in "/">
-; CHECK: #[[SP:.+]] = #llvm.di_subprogram<compileUnit =
+; CHECK: #[[SP:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit =
; CHECK: #[[LB0:.+]] = #llvm.di_lexical_block_file<scope = #[[SP]], discriminator = 0>
; CHECK: #[[LB1:.+]] = #llvm.di_lexical_block_file<scope = #[[SP]], file = #[[FILE]], discriminator = 0>
; CHECK: #[[LOC0]] = loc(fused<#[[LB0]]>[
@@ -197,10 +197,10 @@ define void @composite_type() !dbg !3 {
; // -----
; CHECK-DAG: #[[FILE:.+]] = #llvm.di_file<"debug-info.ll" in "/">
-; CHECK-DAG: #[[CU:.+]] = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #[[FILE]], isOptimized = false, emissionKind = None>
+; CHECK-DAG: #[[CU:.+]] = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #[[FILE]], isOptimized = false, emissionKind = None>
; Verify an empty subroutine types list is supported.
; CHECK-DAG: #[[SP_TYPE:.+]] = #llvm.di_subroutine_type<callingConvention = DW_CC_normal>
-; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram<compileUnit = #[[CU]], scope = #[[FILE]], name = "subprogram", linkageName = "subprogram", file = #[[FILE]], line = 42, scopeLine = 42, subprogramFlags = Definition, type = #[[SP_TYPE]]>
+; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #[[CU]], scope = #[[FILE]], name = "subprogram", linkageName = "subprogram", file = #[[FILE]], line = 42, scopeLine = 42, subprogramFlags = Definition, type = #[[SP_TYPE]]>
define void @subprogram() !dbg !3 {
ret void
@@ -224,7 +224,7 @@ define void @func_loc() !dbg !3 {
}
; CHECK-DAG: #[[NAME_LOC:.+]] = loc("func_loc")
; CHECK-DAG: #[[FILE_LOC:.+]] = loc("debug-info.ll":42:0)
-; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #{{.*}}, name = "func_loc", file = #{{.*}}, line = 42, subprogramFlags = Definition>
+; CHECK-DAG: #[[SP:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #{{.*}}, name = "func_loc", file = #{{.*}}, line = 42, subprogramFlags = Definition>
; CHECK: loc(fused<#[[SP]]>[#[[NAME_LOC]], #[[FILE_LOC]]]
@@ -300,7 +300,7 @@ define void @class_method() {
; CHECK: #[[COMP:.+]] = #llvm.di_composite_type<tag = DW_TAG_class_type, name = "class_name", file = #{{.*}}, line = 42, flags = "TypePassByReference|NonTrivial">
; CHECK: #[[COMP_PTR:.+]] = #llvm.di_derived_type<tag = DW_TAG_pointer_type, baseType = #[[COMP]], sizeInBits = 64>
; CHECK: #[[SP_TYPE:.+]] = #llvm.di_subroutine_type<types = #{{.*}}, #[[COMP_PTR]]>
-; CHECK: #[[SP:.+]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #[[COMP]], name = "class_method", file = #{{.*}}, subprogramFlags = Definition, type = #[[SP_TYPE]]>
+; CHECK: #[[SP:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #[[COMP]], name = "class_method", file = #{{.*}}, subprogramFlags = Definition, type = #[[SP_TYPE]]>
; CHECK: #[[LOC]] = loc(fused<#[[SP]]>
!llvm.dbg.cu = !{!1}
@@ -485,7 +485,7 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
; // -----
; CHECK-DAG: #[[NAMESPACE:.+]] = #llvm.di_namespace<name = "std", exportSymbols = false>
-; CHECK-DAG: #[[SUBPROGRAM:.+]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #[[NAMESPACE]], name = "namespace"
+; CHECK-DAG: #[[SUBPROGRAM:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #[[NAMESPACE]], name = "namespace"
define void @namespace(ptr %arg) {
call void @llvm.dbg.value(metadata ptr %arg, metadata !7, metadata !DIExpression()), !dbg !9
@@ -506,7 +506,7 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
; // -----
-; CHECK-DAG: #[[SUBPROGRAM:.+]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #{{.*}}, name = "noname_variable"
+; CHECK-DAG: #[[SUBPROGRAM:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #{{.*}}, name = "noname_variable"
; CHECK-DAG: #[[LOCAL_VARIABLE:.+]] = #llvm.di_local_variable<scope = #[[SUBPROGRAM]]>
define void @noname_variable(ptr %arg) {
@@ -527,7 +527,7 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
; // -----
-; CHECK: #[[SUBPROGRAM:.*]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #{{.*}}, file = #{{.*}}, subprogramFlags = Definition>
+; CHECK: #[[SUBPROGRAM:.*]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #{{.*}}, file = #{{.*}}, subprogramFlags = Definition>
; CHECK: #[[FUNC_LOC:.*]] = loc(fused<#[[SUBPROGRAM]]>[{{.*}}])
define void @noname_subprogram(ptr %arg) !dbg !8 {
ret void
@@ -547,7 +547,7 @@ define void @noname_subprogram(ptr %arg) !dbg !8 {
; CHECK-SAME: configMacros = "bar", includePath = "/",
; CHECK-SAME: apinotes = "/", line = 42, isDecl = true
; CHECK-SAME: >
-; CHECK: #[[SUBPROGRAM:.+]] = #llvm.di_subprogram<compileUnit = #{{.*}}, scope = #[[MODULE]], name = "func_in_module"
+; CHECK: #[[SUBPROGRAM:.+]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, compileUnit = #{{.*}}, scope = #[[MODULE]], name = "func_in_module"
define void @func_in_module(ptr %arg) !dbg !8 {
ret void
@@ -589,3 +589,41 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
!5 = !DICompositeType(tag: DW_TAG_array_type, size: 42, baseType: !6)
!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !5)
!7 = !DILocation(line: 0, scope: !3)
+
+; // -----
+
+; Verifies that import compile units respect the distinctness of the input.
+; CHECK-LABEL: @distinct_cu_func0
+define void @distinct_cu_func0() !dbg !4 {
+ ret void
+}
+
+define void @distinct_cu_func1() !dbg !5 {
+ ret void
+}
+
+!llvm.dbg.cu = !{!0, !1}
+!llvm.module.flags = !{!3}
+
+; CHECK-COUNT-2: #llvm.di_compile_unit<id = distinct[{{[0-9]+}}]<>
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !2, producer: "clang")
+!1 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !2, producer: "clang")
+!2 = !DIFile(filename: "other.cpp", directory: "/")
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = distinct !DISubprogram(name: "func", linkageName: "func", scope: !6, file: !6, line: 1, scopeLine: 1, flags: DIFlagArtificial, spFlags: DISPFlagDefinition, unit: !0)
+!5 = distinct !DISubprogram(name: "func", linkageName: "func", scope: !6, file: !6, line: 1, scopeLine: 1, flags: DIFlagArtificial, spFlags: DISPFlagDefinition, unit: !1)
+!6 = !DIFile(filename: "file.hpp", directory: "/")
+
+; // -----
+
+; CHECK-LABEL: @declaration
+declare !dbg !1 void @declaration()
+
+; CHECK: #di_subprogram = #llvm.di_subprogram<
+; CHECK-NOT: id = distinct
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 2, !"Debug Info Version", i32 3}
+!1 = !DISubprogram(name: "declaration", scope: !2, file: !2, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+!2 = !DIFile(filename: "debug-info.ll", directory: "/")
diff --git a/mlir/test/Target/LLVMIR/Import/global-variables.ll b/mlir/test/Target/LLVMIR/Import/global-variables.ll
index ab930084323c..9d9734045988 100644
--- a/mlir/test/Target/LLVMIR/Import/global-variables.ll
+++ b/mlir/test/Target/LLVMIR/Import/global-variables.ll
@@ -249,8 +249,8 @@ define void @bar() {
; CHECK-DAG: #[[TYPE:.*]] = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "int", sizeInBits = 32, encoding = DW_ATE_signed>
; CHECK-DAG: #[[FILE:.*]] = #llvm.di_file<"source.c" in "/path/to/file">
-; CHECK-DAG: #[[CU:.*]] = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C99, file = #[[FILE]], isOptimized = false, emissionKind = None>
-; CHECK-DAG: #[[SPROG:.*]] = #llvm.di_subprogram<scope = #[[CU]], name = "foo", file = #[[FILE]], line = 5, subprogramFlags = Definition>
+; CHECK-DAG: #[[CU:.*]] = #llvm.di_compile_unit<id = distinct[{{.*}}]<>, sourceLanguage = DW_LANG_C99, file = #[[FILE]], isOptimized = false, emissionKind = None>
+; CHECK-DAG: #[[SPROG:.*]] = #llvm.di_subprogram<id = distinct[{{.*}}]<>, scope = #[[CU]], name = "foo", file = #[[FILE]], line = 5, subprogramFlags = Definition>
; CHECK-DAG: #[[GVAR0:.*]] = #llvm.di_global_variable<scope = #[[SPROG]], name = "foo", linkageName = "foo", file = #[[FILE]], line = 7, type = #[[TYPE]], isLocalToUnit = true>
; CHECK-DAG: #[[GVAR1:.*]] = #llvm.di_global_variable<scope = #[[SPROG]], name = "bar", linkageName = "bar", file = #[[FILE]], line = 8, type = #[[TYPE]], isLocalToUnit = true>
; CHECK-DAG: #[[EXPR0:.*]] = #llvm.di_global_variable_expression<var = #[[GVAR0]], expr = <[DW_OP_LLVM_fragment(0, 16)]>>
diff --git a/mlir/test/Target/LLVMIR/llvmir-debug.mlir b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
index 1133f57d6b61..476ed165887c 100644
--- a/mlir/test/Target/LLVMIR/llvmir-debug.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-debug.mlir
@@ -35,8 +35,8 @@ llvm.func @func_no_debug() {
tag = DW_TAG_pointer_type, name = "named", baseType = #si32
>
#cu = #llvm.di_compile_unit<
- sourceLanguage = DW_LANG_C, file = #file, producer = "MLIR",
- isOptimized = true, emissionKind = Full
+ id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #file,
+ producer = "MLIR", isOptimized = true, emissionKind = Full
>
#composite = #llvm.di_composite_type<
tag = DW_TAG_structure_type, name = "composite", file = #file,
@@ -172,8 +172,8 @@ llvm.func @empty_types() {
#di_basic_type = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "int", sizeInBits = 32, encoding = DW_ATE_signed>
#di_file = #llvm.di_file<"foo.mlir" in "/test/">
#di_compile_unit = #llvm.di_compile_unit<
- sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR",
- isOptimized = true, emissionKind = Full
+ id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file,
+ producer = "MLIR", isOptimized = true, emissionKind = Full
>
#di_subprogram = #llvm.di_subprogram<
compileUnit = #di_compile_unit, scope = #di_file, name = "outer_func",
@@ -216,8 +216,8 @@ llvm.func @func_with_inlined_dbg_value(%arg0: i32) -> (i32) {
#di_basic_type = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "int", sizeInBits = 32, encoding = DW_ATE_signed>
#di_file = #llvm.di_file<"foo.mlir" in "/test/">
#di_compile_unit = #llvm.di_compile_unit<
- sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR",
- isOptimized = true, emissionKind = Full
+ id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file,
+ producer = "MLIR", isOptimized = true, emissionKind = Full
>
#di_subprogram = #llvm.di_subprogram<
compileUnit = #di_compile_unit, scope = #di_file, name = "func",
@@ -245,8 +245,8 @@ llvm.func @func_without_subprogram(%0 : i32) {
#di_file = #llvm.di_file<"foo.mlir" in "/test/">
#di_compile_unit = #llvm.di_compile_unit<
- sourceLanguage = DW_LANG_C, file = #di_file, producer = "MLIR",
- isOptimized = true, emissionKind = Full
+ id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file,
+ producer = "MLIR", isOptimized = true, emissionKind = Full
>
#di_subprogram = #llvm.di_subprogram<
compileUnit = #di_compile_unit, scope = #di_file, name = "outer_func",
@@ -285,7 +285,7 @@ llvm.func @dbg_intrinsics_with_no_location(%arg0: i32) -> (i32) {
// CHECK-DAG: ![[GVALS]] = !{![[GEXPR0]], ![[GEXPR1]]}
#di_file_2 = #llvm.di_file<"not" in "existence">
-#di_compile_unit_2 = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file_2, producer = "MLIR", isOptimized = true, emissionKind = Full>
+#di_compile_unit_2 = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file_2, producer = "MLIR", isOptimized = true, emissionKind = Full>
#di_basic_type_2 = #llvm.di_basic_type<tag = DW_TAG_base_type, name = "uint64_t", sizeInBits = 64, encoding = DW_ATE_unsigned>
llvm.mlir.global external @global_with_expr_1() {addr_space = 0 : i32, dbg_expr = #llvm.di_global_variable_expression<var = <scope = #di_compile_unit_2, name = "global_with_expr_1", linkageName = "global_with_expr_1", file = #di_file_2, line = 370, type = #di_basic_type_2>, expr = <>>} : i64
llvm.mlir.global external @global_with_expr_2() {addr_space = 0 : i32, dbg_expr = #llvm.di_global_variable_expression<var = <scope = #di_compile_unit_2, name = "global_with_expr_2", linkageName = "global_with_expr_2", file = #di_file_2, line = 371, type = #di_basic_type_2, isLocalToUnit = true, isDefined = true, alignInBits = 8>, expr = <>>} : i64
@@ -315,9 +315,9 @@ llvm.mlir.global external constant @".str.1"() {addr_space = 0 : i32, dbg_expr =
// CHECK-DAG: ![[FILE2:.*]] = !DIFile(filename: "foo2.mlir", directory: "/test/")
#di_file_2 = #llvm.di_file<"foo2.mlir" in "/test/">
// CHECK-DAG: ![[SCOPE2:.*]] = distinct !DICompileUnit(language: DW_LANG_C, file: ![[FILE2]], producer: "MLIR", isOptimized: true, runtimeVersion: 0, emissionKind: DebugDirectivesOnly)
-#di_compile_unit_1 = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file_1, producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
+#di_compile_unit_1 = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file_1, producer = "MLIR", isOptimized = true, emissionKind = LineTablesOnly>
// CHECK-DAG: ![[SCOPE1:.*]] = distinct !DICompileUnit(language: DW_LANG_C, file: ![[FILE1]], producer: "MLIR", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly)
-#di_compile_unit_2 = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file_2, producer = "MLIR", isOptimized = true, emissionKind = DebugDirectivesOnly>
+#di_compile_unit_2 = #llvm.di_compile_unit<id = distinct[1]<>, sourceLanguage = DW_LANG_C, file = #di_file_2, producer = "MLIR", isOptimized = true, emissionKind = DebugDirectivesOnly>
#di_subprogram_1 = #llvm.di_subprogram<compileUnit = #di_compile_unit_1, scope = #di_file_1, name = "func1", file = #di_file_1, subprogramFlags = "Definition|Optimized">
#di_subprogram_2 = #llvm.di_subprogram<compileUnit = #di_compile_unit_2, scope = #di_file_2, name = "func2", file = #di_file_2, subprogramFlags = "Definition|Optimized">
diff --git a/mlir/test/Target/LLVMIR/loop-metadata.mlir b/mlir/test/Target/LLVMIR/loop-metadata.mlir
index a9aeebfa4d82..2fe4a994aeb6 100644
--- a/mlir/test/Target/LLVMIR/loop-metadata.mlir
+++ b/mlir/test/Target/LLVMIR/loop-metadata.mlir
@@ -297,7 +297,7 @@ llvm.func @loopOptions(%arg1 : i32, %arg2 : i32) {
#loc1 = loc("loop-metadata.mlir":42:4)
#loc2 = loc("loop-metadata.mlir":52:4)
-#di_compile_unit = #llvm.di_compile_unit<sourceLanguage = DW_LANG_C, file = #di_file, isOptimized = false, emissionKind = None>
+#di_compile_unit = #llvm.di_compile_unit<id = distinct[0]<>, sourceLanguage = DW_LANG_C, file = #di_file, isOptimized = false, emissionKind = None>
#di_subprogram = #llvm.di_subprogram<compileUnit = #di_compile_unit, scope = #di_file, name = "loop_locs", file = #di_file, subprogramFlags = Definition>
#start_loc_fused = loc(fused<#di_subprogram>[#loc1])
diff --git a/mlir/test/Target/SPIRV/atomic-ops.mlir b/mlir/test/Target/SPIRV/atomic-ops.mlir
index 594c6faef923..cb7d9626e6c6 100644
--- a/mlir/test/Target/SPIRV/atomic-ops.mlir
+++ b/mlir/test/Target/SPIRV/atomic-ops.mlir
@@ -3,41 +3,41 @@
spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], []> {
// CHECK-LABEL: @test_int_atomics
spirv.func @test_int_atomics(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 "None" {
- // CHECK: spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %0 = spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicAnd "Device" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %1 = spirv.AtomicAnd "Device" "None" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicIAdd "Workgroup" "Acquire" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %2 = spirv.AtomicIAdd "Workgroup" "Acquire" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicIDecrement "Workgroup" "Acquire" %{{.*}} : !spirv.ptr<i32, Workgroup>
- %3 = spirv.AtomicIDecrement "Workgroup" "Acquire" %ptr : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicIIncrement "Device" "Release" %{{.*}} : !spirv.ptr<i32, Workgroup>
- %4 = spirv.AtomicIIncrement "Device" "Release" %ptr : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicISub "Workgroup" "Acquire" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %5 = spirv.AtomicISub "Workgroup" "Acquire" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicOr "Workgroup" "AcquireRelease" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %6 = spirv.AtomicOr "Workgroup" "AcquireRelease" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicSMax "Subgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %7 = spirv.AtomicSMax "Subgroup" "None" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicSMin "Device" "Release" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %8 = spirv.AtomicSMin "Device" "Release" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicUMax "Subgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %9 = spirv.AtomicUMax "Subgroup" "None" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicUMin "Device" "Release" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %10 = spirv.AtomicUMin "Device" "Release" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicXor "Workgroup" "AcquireRelease" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %11 = spirv.AtomicXor "Workgroup" "AcquireRelease" %ptr, %value : !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %12 = spirv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
- // CHECK: spirv.AtomicExchange "Workgroup" "Release" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
- %13 = spirv.AtomicExchange "Workgroup" "Release" %ptr, %value: !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicCompareExchangeWeak <Workgroup> <Release> <Acquire> %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %0 = spirv.AtomicCompareExchangeWeak <Workgroup> <Release> <Acquire> %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicAnd <Device> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %1 = spirv.AtomicAnd <Device> <None> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicIAdd <Workgroup> <Acquire> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %2 = spirv.AtomicIAdd <Workgroup> <Acquire> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicIDecrement <Workgroup> <Acquire> %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %3 = spirv.AtomicIDecrement <Workgroup> <Acquire> %ptr : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicIIncrement <Device> <Release> %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %4 = spirv.AtomicIIncrement <Device> <Release> %ptr : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicISub <Workgroup> <Acquire> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %5 = spirv.AtomicISub <Workgroup> <Acquire> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicOr <Workgroup> <AcquireRelease> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %6 = spirv.AtomicOr <Workgroup> <AcquireRelease> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicSMax <Subgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %7 = spirv.AtomicSMax <Subgroup> <None> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicSMin <Device> <Release> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %8 = spirv.AtomicSMin <Device> <Release> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicUMax <Subgroup> <None> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %9 = spirv.AtomicUMax <Subgroup> <None> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicUMin <Device> <Release> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %10 = spirv.AtomicUMin <Device> <Release> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicXor <Workgroup> <AcquireRelease> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %11 = spirv.AtomicXor <Workgroup> <AcquireRelease> %ptr, %value : !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicCompareExchange <Workgroup> <Release> <Acquire> %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %12 = spirv.AtomicCompareExchange <Workgroup> <Release> <Acquire> %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
+ // CHECK: spirv.AtomicExchange <Workgroup> <Release> %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+ %13 = spirv.AtomicExchange <Workgroup> <Release> %ptr, %value: !spirv.ptr<i32, Workgroup>
spirv.ReturnValue %0: i32
}
// CHECK-LABEL: @test_float_atomics
spirv.func @test_float_atomics(%ptr: !spirv.ptr<f32, Workgroup>, %value: f32) -> f32 "None" {
- // CHECK: spirv.EXT.AtomicFAdd "Workgroup" "Acquire" %{{.*}}, %{{.*}} : !spirv.ptr<f32, Workgroup>
- %0 = spirv.EXT.AtomicFAdd "Workgroup" "Acquire" %ptr, %value : !spirv.ptr<f32, Workgroup>
+ // CHECK: spirv.EXT.AtomicFAdd <Workgroup> <Acquire> %{{.*}}, %{{.*}} : !spirv.ptr<f32, Workgroup>
+ %0 = spirv.EXT.AtomicFAdd <Workgroup> <Acquire> %ptr, %value : !spirv.ptr<f32, Workgroup>
spirv.ReturnValue %0: f32
}
}
diff --git a/mlir/test/Target/SPIRV/cast-ops.mlir b/mlir/test/Target/SPIRV/cast-ops.mlir
index 7fe0969497c3..ede0bf30511e 100644
--- a/mlir/test/Target/SPIRV/cast-ops.mlir
+++ b/mlir/test/Target/SPIRV/cast-ops.mlir
@@ -115,7 +115,7 @@ spirv.module Physical64 OpenCL requires #spirv.vce<v1.0, [Kernel, Addresses], []
// -----
spirv.module PhysicalStorageBuffer64 OpenCL requires #spirv.vce<v1.0, [Kernel, Addresses, PhysicalStorageBufferAddresses], []> {
- spirv.func @covert_ptr_to_u_PhysicalStorageBuffer(%arg0 : !spirv.ptr<i32, PhysicalStorageBuffer>) "None" {
+ spirv.func @covert_ptr_to_u_PhysicalStorageBuffer(%arg0 : !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased>} ) "None" {
// CHECK: {{%.*}} = spirv.ConvertPtrToU {{%.*}} : !spirv.ptr<i32, PhysicalStorageBuffer> to i32
%0 = spirv.ConvertPtrToU %arg0 : !spirv.ptr<i32, PhysicalStorageBuffer> to i32
spirv.Return
diff --git a/mlir/test/Target/SPIRV/debug.mlir b/mlir/test/Target/SPIRV/debug.mlir
index 21f90309cde3..50c83d876bef 100644
--- a/mlir/test/Target/SPIRV/debug.mlir
+++ b/mlir/test/Target/SPIRV/debug.mlir
@@ -13,7 +13,7 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], []> {
spirv.func @atomic(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) "None" {
// CHECK: loc({{".*debug.mlir"}}:16:10)
- %1 = spirv.AtomicAnd "Device" "None" %ptr, %value : !spirv.ptr<i32, Workgroup>
+ %1 = spirv.AtomicAnd <Device> <None> %ptr, %value : !spirv.ptr<i32, Workgroup>
spirv.Return
}
diff --git a/mlir/test/Target/SPIRV/function-decorations.mlir b/mlir/test/Target/SPIRV/function-decorations.mlir
index b0f6705df9ca..117d4ca628f7 100644
--- a/mlir/test/Target/SPIRV/function-decorations.mlir
+++ b/mlir/test/Target/SPIRV/function-decorations.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-translate -no-implicit-module -test-spirv-roundtrip %s | FileCheck %s
+// RUN: mlir-translate -no-implicit-module -test-spirv-roundtrip -split-input-file -verify-diagnostics %s | FileCheck %s
spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader, Linkage], []> {
spirv.func @linkage_attr_test_kernel() "DontInline" attributes {} {
@@ -17,3 +17,72 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader, Linkage], []> {
}
spirv.func @inside.func() -> () "Pure" attributes {} {spirv.Return}
}
+
+// -----
+
+spirv.module PhysicalStorageBuffer64 GLSL450 requires #spirv.vce<v1.0,
+ [Shader, PhysicalStorageBufferAddresses], [SPV_KHR_physical_storage_buffer]> {
+ // CHECK-LABEL: spirv.func @func_arg_decoration_aliased(%{{.*}}: !spirv.ptr<i32, PhysicalStorageBuffer> {spirv.decoration = #spirv.decoration<Aliased>})
+ spirv.func @func_arg_decoration_aliased(
+ %arg0 : !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased> }
+ ) "None" {
+ spirv.Return
+ }
+}
+
+// -----
+
+spirv.module PhysicalStorageBuffer64 GLSL450 requires #spirv.vce<v1.0,
+ [Shader, PhysicalStorageBufferAddresses], [SPV_KHR_physical_storage_buffer]> {
+ // CHECK-LABEL: spirv.func @func_arg_decoration_restrict(%{{.*}}: !spirv.ptr<i32, PhysicalStorageBuffer> {spirv.decoration = #spirv.decoration<Restrict>})
+ spirv.func @func_arg_decoration_restrict(
+ %arg0 : !spirv.ptr<i32,PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Restrict> }
+ ) "None" {
+ spirv.Return
+ }
+}
+
+// -----
+
+spirv.module PhysicalStorageBuffer64 GLSL450 requires #spirv.vce<v1.0,
+ [Shader, PhysicalStorageBufferAddresses], [SPV_KHR_physical_storage_buffer]> {
+ // CHECK-LABEL: spirv.func @func_arg_decoration_aliased_pointer(%{{.*}}: !spirv.ptr<!spirv.ptr<i32, PhysicalStorageBuffer>, Generic> {spirv.decoration = #spirv.decoration<AliasedPointer>})
+ spirv.func @func_arg_decoration_aliased_pointer(
+ %arg0 : !spirv.ptr<!spirv.ptr<i32,PhysicalStorageBuffer>, Generic> { spirv.decoration = #spirv.decoration<AliasedPointer> }
+ ) "None" {
+ spirv.Return
+ }
+}
+
+// -----
+
+spirv.module PhysicalStorageBuffer64 GLSL450 requires #spirv.vce<v1.0,
+ [Shader, PhysicalStorageBufferAddresses], [SPV_KHR_physical_storage_buffer]> {
+ // CHECK-LABEL: spirv.func @func_arg_decoration_restrict_pointer(%{{.*}}: !spirv.ptr<!spirv.ptr<i32, PhysicalStorageBuffer>, Generic> {spirv.decoration = #spirv.decoration<RestrictPointer>})
+ spirv.func @func_arg_decoration_restrict_pointer(
+ %arg0 : !spirv.ptr<!spirv.ptr<i32,PhysicalStorageBuffer>, Generic> { spirv.decoration = #spirv.decoration<RestrictPointer> }
+ ) "None" {
+ spirv.Return
+ }
+}
+
+// -----
+
+spirv.module PhysicalStorageBuffer64 GLSL450 requires #spirv.vce<v1.0,
+ [Shader, PhysicalStorageBufferAddresses], [SPV_KHR_physical_storage_buffer]> {
+ // CHECK-LABEL: spirv.func @fn1(%{{.*}}: i32, %{{.*}}: !spirv.ptr<i32, PhysicalStorageBuffer> {spirv.decoration = #spirv.decoration<Aliased>})
+ spirv.func @fn1(
+ %arg0: i32,
+ %arg1: !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased> }
+ ) "None" {
+ spirv.Return
+ }
+
+ // CHECK-LABEL: spirv.func @fn2(%{{.*}}: !spirv.ptr<i32, PhysicalStorageBuffer> {spirv.decoration = #spirv.decoration<Aliased>}, %{{.*}}: !spirv.ptr<i32, PhysicalStorageBuffer> {spirv.decoration = #spirv.decoration<Restrict>})
+ spirv.func @fn2(
+ %arg0: !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Aliased> },
+ %arg1: !spirv.ptr<i32, PhysicalStorageBuffer> { spirv.decoration = #spirv.decoration<Restrict>}
+ ) "None" {
+ spirv.Return
+ }
+}
diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir
index 47a19bb598c2..9b578e6c2631 100644
--- a/mlir/test/Transforms/canonicalize.mlir
+++ b/mlir/test/Transforms/canonicalize.mlir
@@ -1224,3 +1224,14 @@ func.func @clone_nested_region(%arg0: index, %arg1: index, %arg2: index) -> memr
// CHECK-NEXT: scf.yield %[[ALLOC3_2]]
// CHECK: memref.dealloc %[[ALLOC1]]
// CHECK-NEXT: return %[[ALLOC2]]
+
+// -----
+
+// CHECK-LABEL: func @test_materialize_failure
+func.func @test_materialize_failure() -> i64 {
+ %const = index.constant 1234
+ // Cannot materialize this castu's output constant.
+ // CHECK: index.castu
+ %u = index.castu %const : index to i64
+ return %u: i64
+}
diff --git a/mlir/test/python/dialects/memref.py b/mlir/test/python/dialects/memref.py
index 2e3cae671a9f..0c8a7ee282fe 100644
--- a/mlir/test/python/dialects/memref.py
+++ b/mlir/test/python/dialects/memref.py
@@ -3,6 +3,7 @@
from mlir.ir import *
import mlir.dialects.func as func
import mlir.dialects.memref as memref
+import mlir.extras.types as T
def run(f):
@@ -76,3 +77,14 @@ def testCustomBuidlers():
# CHECK: memref.load %[[ARG0]][%[[ARG1]], %[[ARG2]]]
print(module)
assert module.operation.verify()
+
+
+# CHECK-LABEL: TEST: testMemRefAttr
+@run
+def testMemRefAttr():
+ with Context() as ctx, Location.unknown(ctx):
+ module = Module.create()
+ with InsertionPoint(module.body):
+ memref.global_("objFifo_in0", T.memref(16, T.i32()))
+ # CHECK: memref.global @objFifo_in0 : memref<16xi32>
+ print(module)
diff --git a/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp b/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp
new file mode 100644
index 000000000000..b88baa6c6b48
--- /dev/null
+++ b/mlir/unittests/Analysis/Presburger/BarvinokTest.cpp
@@ -0,0 +1,48 @@
+#include "mlir/Analysis/Presburger/Barvinok.h"
+#include "./Utils.h"
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+using namespace mlir;
+using namespace presburger;
+using namespace mlir::presburger::detail;
+
+/// The following are 3 randomly generated vectors with 4
+/// entries each and define a cone's H-representation
+/// using these numbers. We check that the dual contains
+/// the same numbers.
+/// We do the same in the reverse case.
+TEST(BarvinokTest, getDual) {
+ ConeH cone1 = defineHRep(4);
+ cone1.addInequality({1, 2, 3, 4, 0});
+ cone1.addInequality({3, 4, 2, 5, 0});
+ cone1.addInequality({6, 2, 6, 1, 0});
+
+ ConeV dual1 = getDual(cone1);
+
+ EXPECT_EQ_INT_MATRIX(
+ dual1, makeIntMatrix(3, 4, {{1, 2, 3, 4}, {3, 4, 2, 5}, {6, 2, 6, 1}}));
+
+ ConeV cone2 = makeIntMatrix(3, 4, {{3, 6, 1, 5}, {3, 1, 7, 2}, {9, 3, 2, 7}});
+
+ ConeH dual2 = getDual(cone2);
+
+ ConeH expected = defineHRep(4);
+ expected.addInequality({3, 6, 1, 5, 0});
+ expected.addInequality({3, 1, 7, 2, 0});
+ expected.addInequality({9, 3, 2, 7, 0});
+
+ EXPECT_TRUE(dual2.isEqual(expected));
+}
+
+/// We randomly generate a nxn matrix to use as a cone
+/// with n inequalities in n variables and check for
+/// the determinant being equal to the index.
+TEST(BarvinokTest, getIndex) {
+ ConeV cone = makeIntMatrix(3, 3, {{4, 2, 1}, {5, 2, 7}, {4, 1, 6}});
+ EXPECT_EQ(getIndex(cone), cone.determinant());
+
+ cone = makeIntMatrix(
+ 4, 4, {{4, 2, 5, 1}, {4, 1, 3, 6}, {8, 2, 5, 6}, {5, 2, 5, 7}});
+ EXPECT_EQ(getIndex(cone), cone.determinant());
+}
diff --git a/mlir/unittests/Analysis/Presburger/CMakeLists.txt b/mlir/unittests/Analysis/Presburger/CMakeLists.txt
index e37133354e53..c98668f63fa5 100644
--- a/mlir/unittests/Analysis/Presburger/CMakeLists.txt
+++ b/mlir/unittests/Analysis/Presburger/CMakeLists.txt
@@ -1,5 +1,7 @@
add_mlir_unittest(MLIRPresburgerTests
+ BarvinokTest.cpp
FractionTest.cpp
+ GeneratingFunctionTest.cpp
IntegerPolyhedronTest.cpp
IntegerRelationTest.cpp
LinearTransformTest.cpp
diff --git a/mlir/unittests/Analysis/Presburger/GeneratingFunctionTest.cpp b/mlir/unittests/Analysis/Presburger/GeneratingFunctionTest.cpp
new file mode 100644
index 000000000000..3fc68cddaad0
--- /dev/null
+++ b/mlir/unittests/Analysis/Presburger/GeneratingFunctionTest.cpp
@@ -0,0 +1,39 @@
+//===- MatrixTest.cpp - Tests for QuasiPolynomial -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Analysis/Presburger/GeneratingFunction.h"
+#include "./Utils.h"
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+using namespace mlir;
+using namespace presburger;
+using namespace mlir::presburger::detail;
+
+TEST(GeneratingFunctionTest, sum) {
+ GeneratingFunction gf1(2, {1, -1},
+ {makeFracMatrix(3, 2, {{1, 2}, {5, 7}, {2, 6}}),
+ makeFracMatrix(3, 2, {{5, 2}, {5, 3}, {7, 2}})},
+ {{{3, 6}, {7, 2}}, {{2, 8}, {6, 3}}});
+ GeneratingFunction gf2(2, {1, 1},
+ {makeFracMatrix(3, 2, {{6, 2}, {1, 4}, {2, 6}}),
+ makeFracMatrix(3, 2, {{3, 2}, {6, 9}, {2, 5}})},
+ {{{3, 7}, {5, 1}}, {{5, 2}, {6, 2}}});
+
+ GeneratingFunction sum = gf1 + gf2;
+ EXPECT_EQ_REPR_GENERATINGFUNCTION(
+ sum, GeneratingFunction(2, {1, -1, 1, 1},
+ {makeFracMatrix(3, 2, {{1, 2}, {5, 7}, {2, 6}}),
+ makeFracMatrix(3, 2, {{5, 2}, {5, 3}, {7, 2}}),
+ makeFracMatrix(3, 2, {{6, 2}, {1, 4}, {2, 6}}),
+ makeFracMatrix(3, 2, {{3, 2}, {6, 9}, {2, 5}})},
+ {{{3, 6}, {7, 2}},
+ {{2, 8}, {6, 3}},
+ {{3, 7}, {5, 1}},
+ {{5, 2}, {6, 2}}}));
+}
diff --git a/mlir/unittests/Analysis/Presburger/IntegerRelationTest.cpp b/mlir/unittests/Analysis/Presburger/IntegerRelationTest.cpp
index f390296da648..dff092b3204b 100644
--- a/mlir/unittests/Analysis/Presburger/IntegerRelationTest.cpp
+++ b/mlir/unittests/Analysis/Presburger/IntegerRelationTest.cpp
@@ -207,3 +207,247 @@ TEST(IntegerRelationTest, swapVar) {
EXPECT_TRUE(swappedSpace.getId(VarKind::Symbol, 1)
.isEqual(space.getId(VarKind::Domain, 1)));
}
+
+TEST(IntegerRelationTest, mergeAndAlignSymbols) {
+ IntegerRelation rel =
+ parseRelationFromSet("(x, y, z, a, b, c)[N, Q] : (a - x - y == 0, "
+ "x >= 0, N - b >= 0, y >= 0, Q - y >= 0)",
+ 3);
+ IntegerRelation otherRel = parseRelationFromSet(
+ "(x, y, z, a, b)[N, M, P] : (z - x - y == 0, x >= 0, N - x "
+ ">= 0, y >= 0, M - y >= 0, 2 * P - 3 * a + 2 * b == 0)",
+ 3);
+ PresburgerSpace space = PresburgerSpace::getRelationSpace(3, 3, 2, 0);
+ space.resetIds();
+
+ PresburgerSpace otherSpace = PresburgerSpace::getRelationSpace(3, 2, 3, 0);
+ otherSpace.resetIds();
+
+ // Attach identifiers.
+ int identifiers[7] = {0, 1, 2, 3, 4, 5, 6};
+ int otherIdentifiers[8] = {10, 11, 12, 13, 14, 15, 16, 17};
+
+ space.getId(VarKind::Domain, 0) = Identifier(&identifiers[0]);
+ space.getId(VarKind::Domain, 1) = Identifier(&identifiers[1]);
+ // Note the common identifier.
+ space.getId(VarKind::Domain, 2) = Identifier(&otherIdentifiers[2]);
+ space.getId(VarKind::Range, 0) = Identifier(&identifiers[2]);
+ space.getId(VarKind::Range, 1) = Identifier(&identifiers[3]);
+ space.getId(VarKind::Range, 2) = Identifier(&identifiers[4]);
+ space.getId(VarKind::Symbol, 0) = Identifier(&identifiers[5]);
+ space.getId(VarKind::Symbol, 1) = Identifier(&identifiers[6]);
+
+ otherSpace.getId(VarKind::Domain, 0) = Identifier(&otherIdentifiers[0]);
+ otherSpace.getId(VarKind::Domain, 1) = Identifier(&otherIdentifiers[1]);
+ otherSpace.getId(VarKind::Domain, 2) = Identifier(&otherIdentifiers[2]);
+ otherSpace.getId(VarKind::Range, 0) = Identifier(&otherIdentifiers[3]);
+ otherSpace.getId(VarKind::Range, 1) = Identifier(&otherIdentifiers[4]);
+ // Note the common identifier.
+ otherSpace.getId(VarKind::Symbol, 0) = Identifier(&identifiers[6]);
+ otherSpace.getId(VarKind::Symbol, 1) = Identifier(&otherIdentifiers[5]);
+ otherSpace.getId(VarKind::Symbol, 2) = Identifier(&otherIdentifiers[7]);
+
+ rel.setSpace(space);
+ otherRel.setSpace(otherSpace);
+ rel.mergeAndAlignSymbols(otherRel);
+
+ space = rel.getSpace();
+ otherSpace = otherRel.getSpace();
+
+ // Check if merge and align is successful.
+ // Check symbol var identifiers.
+ EXPECT_EQ(4u, space.getNumSymbolVars());
+ EXPECT_EQ(4u, otherSpace.getNumSymbolVars());
+ EXPECT_EQ(space.getId(VarKind::Symbol, 0), Identifier(&identifiers[5]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 1), Identifier(&identifiers[6]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 2), Identifier(&otherIdentifiers[5]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 3), Identifier(&otherIdentifiers[7]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 0), Identifier(&identifiers[5]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 1), Identifier(&identifiers[6]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 2),
+ Identifier(&otherIdentifiers[5]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 3),
+ Identifier(&otherIdentifiers[7]));
+ // Check that domain and range var identifiers are not affected.
+ EXPECT_EQ(3u, space.getNumDomainVars());
+ EXPECT_EQ(3u, space.getNumRangeVars());
+ EXPECT_EQ(space.getId(VarKind::Domain, 0), Identifier(&identifiers[0]));
+ EXPECT_EQ(space.getId(VarKind::Domain, 1), Identifier(&identifiers[1]));
+ EXPECT_EQ(space.getId(VarKind::Domain, 2), Identifier(&otherIdentifiers[2]));
+ EXPECT_EQ(space.getId(VarKind::Range, 0), Identifier(&identifiers[2]));
+ EXPECT_EQ(space.getId(VarKind::Range, 1), Identifier(&identifiers[3]));
+ EXPECT_EQ(space.getId(VarKind::Range, 2), Identifier(&identifiers[4]));
+ EXPECT_EQ(3u, otherSpace.getNumDomainVars());
+ EXPECT_EQ(2u, otherSpace.getNumRangeVars());
+ EXPECT_EQ(otherSpace.getId(VarKind::Domain, 0),
+ Identifier(&otherIdentifiers[0]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Domain, 1),
+ Identifier(&otherIdentifiers[1]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Domain, 2),
+ Identifier(&otherIdentifiers[2]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Range, 0),
+ Identifier(&otherIdentifiers[3]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Range, 1),
+ Identifier(&otherIdentifiers[4]));
+}
+
+// Check that mergeAndAlignSymbols unions symbol variables when they are
+// disjoint.
+TEST(IntegerRelationTest, mergeAndAlignDisjointSymbols) {
+ IntegerRelation rel = parseRelationFromSet(
+ "(x, y, z)[A, B, C, D] : (x + A - C - y + D - z >= 0)", 2);
+ IntegerRelation otherRel = parseRelationFromSet(
+ "(u, v, a, b)[E, F, G, H] : (E - u + v == 0, v - G - H >= 0)", 2);
+ PresburgerSpace space = PresburgerSpace::getRelationSpace(2, 1, 4, 0);
+ space.resetIds();
+
+ PresburgerSpace otherSpace = PresburgerSpace::getRelationSpace(2, 2, 4, 0);
+ otherSpace.resetIds();
+
+ // Attach identifiers.
+ int identifiers[7] = {'x', 'y', 'z', 'A', 'B', 'C', 'D'};
+ int otherIdentifiers[8] = {'u', 'v', 'a', 'b', 'E', 'F', 'G', 'H'};
+
+ space.getId(VarKind::Domain, 0) = Identifier(&identifiers[0]);
+ space.getId(VarKind::Domain, 1) = Identifier(&identifiers[1]);
+ space.getId(VarKind::Range, 0) = Identifier(&identifiers[2]);
+ space.getId(VarKind::Symbol, 0) = Identifier(&identifiers[3]);
+ space.getId(VarKind::Symbol, 1) = Identifier(&identifiers[4]);
+ space.getId(VarKind::Symbol, 2) = Identifier(&identifiers[5]);
+ space.getId(VarKind::Symbol, 3) = Identifier(&identifiers[6]);
+
+ otherSpace.getId(VarKind::Domain, 0) = Identifier(&otherIdentifiers[0]);
+ otherSpace.getId(VarKind::Domain, 1) = Identifier(&otherIdentifiers[1]);
+ otherSpace.getId(VarKind::Range, 0) = Identifier(&otherIdentifiers[2]);
+ otherSpace.getId(VarKind::Range, 1) = Identifier(&otherIdentifiers[3]);
+ otherSpace.getId(VarKind::Symbol, 0) = Identifier(&otherIdentifiers[4]);
+ otherSpace.getId(VarKind::Symbol, 1) = Identifier(&otherIdentifiers[5]);
+ otherSpace.getId(VarKind::Symbol, 2) = Identifier(&otherIdentifiers[6]);
+ otherSpace.getId(VarKind::Symbol, 3) = Identifier(&otherIdentifiers[7]);
+
+ rel.setSpace(space);
+ otherRel.setSpace(otherSpace);
+ rel.mergeAndAlignSymbols(otherRel);
+
+ space = rel.getSpace();
+ otherSpace = otherRel.getSpace();
+
+ // Check if merge and align is successful.
+ // Check symbol var identifiers.
+ EXPECT_EQ(8u, space.getNumSymbolVars());
+ EXPECT_EQ(8u, otherSpace.getNumSymbolVars());
+ EXPECT_EQ(space.getId(VarKind::Symbol, 0), Identifier(&identifiers[3]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 1), Identifier(&identifiers[4]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 2), Identifier(&identifiers[5]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 3), Identifier(&identifiers[6]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 4), Identifier(&otherIdentifiers[4]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 5), Identifier(&otherIdentifiers[5]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 6), Identifier(&otherIdentifiers[6]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 7), Identifier(&otherIdentifiers[7]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 0), Identifier(&identifiers[3]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 1), Identifier(&identifiers[4]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 2), Identifier(&identifiers[5]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 3), Identifier(&identifiers[6]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 4),
+ Identifier(&otherIdentifiers[4]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 5),
+ Identifier(&otherIdentifiers[5]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 6),
+ Identifier(&otherIdentifiers[6]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 7),
+ Identifier(&otherIdentifiers[7]));
+ // Check that domain and range var identifiers are not affected.
+ EXPECT_EQ(2u, space.getNumDomainVars());
+ EXPECT_EQ(1u, space.getNumRangeVars());
+ EXPECT_EQ(space.getId(VarKind::Domain, 0), Identifier(&identifiers[0]));
+ EXPECT_EQ(space.getId(VarKind::Domain, 1), Identifier(&identifiers[1]));
+ EXPECT_EQ(space.getId(VarKind::Range, 0), Identifier(&identifiers[2]));
+ EXPECT_EQ(2u, otherSpace.getNumDomainVars());
+ EXPECT_EQ(2u, otherSpace.getNumRangeVars());
+ EXPECT_EQ(otherSpace.getId(VarKind::Domain, 0),
+ Identifier(&otherIdentifiers[0]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Domain, 1),
+ Identifier(&otherIdentifiers[1]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Range, 0),
+ Identifier(&otherIdentifiers[2]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Range, 1),
+ Identifier(&otherIdentifiers[3]));
+}
+
+// Check that mergeAndAlignSymbols is correct when a suffix of identifiers is
+// shared; i.e. identifiers are [A, B, C, D] and [E, F, C, D].
+TEST(IntegerRelationTest, mergeAndAlignCommonSuffixSymbols) {
+ IntegerRelation rel = parseRelationFromSet(
+ "(x, y, z)[A, B, C, D] : (x + A - C - y + D - z >= 0)", 2);
+ IntegerRelation otherRel = parseRelationFromSet(
+ "(u, v, a, b)[E, F, C, D] : (E - u + v == 0, v - C - D >= 0)", 2);
+ PresburgerSpace space = PresburgerSpace::getRelationSpace(2, 1, 4, 0);
+ space.resetIds();
+
+ PresburgerSpace otherSpace = PresburgerSpace::getRelationSpace(2, 2, 4, 0);
+ otherSpace.resetIds();
+
+ // Attach identifiers.
+ int identifiers[7] = {'x', 'y', 'z', 'A', 'B', 'C', 'D'};
+ int otherIdentifiers[6] = {'u', 'v', 'a', 'b', 'E', 'F'};
+
+ space.getId(VarKind::Domain, 0) = Identifier(&identifiers[0]);
+ space.getId(VarKind::Domain, 1) = Identifier(&identifiers[1]);
+ space.getId(VarKind::Range, 0) = Identifier(&identifiers[2]);
+ space.getId(VarKind::Symbol, 0) = Identifier(&identifiers[3]);
+ space.getId(VarKind::Symbol, 1) = Identifier(&identifiers[4]);
+ space.getId(VarKind::Symbol, 2) = Identifier(&identifiers[5]);
+ space.getId(VarKind::Symbol, 3) = Identifier(&identifiers[6]);
+
+ otherSpace.getId(VarKind::Domain, 0) = Identifier(&otherIdentifiers[0]);
+ otherSpace.getId(VarKind::Domain, 1) = Identifier(&otherIdentifiers[1]);
+ otherSpace.getId(VarKind::Range, 0) = Identifier(&otherIdentifiers[2]);
+ otherSpace.getId(VarKind::Range, 1) = Identifier(&otherIdentifiers[3]);
+ otherSpace.getId(VarKind::Symbol, 0) = Identifier(&otherIdentifiers[4]);
+ otherSpace.getId(VarKind::Symbol, 1) = Identifier(&otherIdentifiers[5]);
+ // Note common identifiers
+ otherSpace.getId(VarKind::Symbol, 2) = Identifier(&identifiers[5]);
+ otherSpace.getId(VarKind::Symbol, 3) = Identifier(&identifiers[6]);
+
+ rel.setSpace(space);
+ otherRel.setSpace(otherSpace);
+ rel.mergeAndAlignSymbols(otherRel);
+
+ space = rel.getSpace();
+ otherSpace = otherRel.getSpace();
+
+ // Check if merge and align is successful.
+ // Check symbol var identifiers.
+ EXPECT_EQ(6u, space.getNumSymbolVars());
+ EXPECT_EQ(6u, otherSpace.getNumSymbolVars());
+ EXPECT_EQ(space.getId(VarKind::Symbol, 0), Identifier(&identifiers[3]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 1), Identifier(&identifiers[4]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 2), Identifier(&identifiers[5]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 3), Identifier(&identifiers[6]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 4), Identifier(&otherIdentifiers[4]));
+ EXPECT_EQ(space.getId(VarKind::Symbol, 5), Identifier(&otherIdentifiers[5]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 0), Identifier(&identifiers[3]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 1), Identifier(&identifiers[4]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 2), Identifier(&identifiers[5]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 3), Identifier(&identifiers[6]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 4),
+ Identifier(&otherIdentifiers[4]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Symbol, 5),
+ Identifier(&otherIdentifiers[5]));
+ // Check that domain and range var identifiers are not affected.
+ EXPECT_EQ(2u, space.getNumDomainVars());
+ EXPECT_EQ(1u, space.getNumRangeVars());
+ EXPECT_EQ(space.getId(VarKind::Domain, 0), Identifier(&identifiers[0]));
+ EXPECT_EQ(space.getId(VarKind::Domain, 1), Identifier(&identifiers[1]));
+ EXPECT_EQ(space.getId(VarKind::Range, 0), Identifier(&identifiers[2]));
+ EXPECT_EQ(2u, otherSpace.getNumDomainVars());
+ EXPECT_EQ(2u, otherSpace.getNumRangeVars());
+ EXPECT_EQ(otherSpace.getId(VarKind::Domain, 0),
+ Identifier(&otherIdentifiers[0]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Domain, 1),
+ Identifier(&otherIdentifiers[1]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Range, 0),
+ Identifier(&otherIdentifiers[2]));
+ EXPECT_EQ(otherSpace.getId(VarKind::Range, 1),
+ Identifier(&otherIdentifiers[3]));
+}
diff --git a/mlir/unittests/Analysis/Presburger/Utils.h b/mlir/unittests/Analysis/Presburger/Utils.h
index 2a9966c7ce2e..6b00898a7e27 100644
--- a/mlir/unittests/Analysis/Presburger/Utils.h
+++ b/mlir/unittests/Analysis/Presburger/Utils.h
@@ -13,6 +13,7 @@
#ifndef MLIR_UNITTESTS_ANALYSIS_PRESBURGER_UTILS_H
#define MLIR_UNITTESTS_ANALYSIS_PRESBURGER_UTILS_H
+#include "mlir/Analysis/Presburger/GeneratingFunction.h"
#include "mlir/Analysis/Presburger/IntegerRelation.h"
#include "mlir/Analysis/Presburger/Matrix.h"
#include "mlir/Analysis/Presburger/PWMAFunction.h"
@@ -72,9 +73,42 @@ inline void EXPECT_EQ_FRAC_MATRIX(FracMatrix a, FracMatrix b) {
EXPECT_EQ(a(row, col), b(row, col));
}
+// Check the coefficients (in order) of two generating functions.
+// Note that this is not a true equality check.
+inline void EXPECT_EQ_REPR_GENERATINGFUNCTION(detail::GeneratingFunction a,
+ detail::GeneratingFunction b) {
+ EXPECT_EQ(a.getNumParams(), b.getNumParams());
+
+ SmallVector<int> aSigns = a.getSigns();
+ SmallVector<int> bSigns = b.getSigns();
+ EXPECT_EQ(aSigns.size(), bSigns.size());
+ for (unsigned i = 0, e = aSigns.size(); i < e; i++)
+ EXPECT_EQ(aSigns[i], bSigns[i]);
+
+ std::vector<detail::ParamPoint> aNums = a.getNumerators();
+ std::vector<detail::ParamPoint> bNums = b.getNumerators();
+ EXPECT_EQ(aNums.size(), bNums.size());
+ for (unsigned i = 0, e = aNums.size(); i < e; i++)
+ EXPECT_EQ_FRAC_MATRIX(aNums[i], bNums[i]);
+
+ std::vector<std::vector<detail::Point>> aDens = a.getDenominators();
+ std::vector<std::vector<detail::Point>> bDens = b.getDenominators();
+ EXPECT_EQ(aDens.size(), bDens.size());
+ for (unsigned i = 0, e = aDens.size(); i < e; i++) {
+ EXPECT_EQ(aDens[i].size(), bDens[i].size());
+ for (unsigned j = 0, f = aDens[i].size(); j < f; j++) {
+ EXPECT_EQ(aDens[i][j].size(), bDens[i][j].size());
+ for (unsigned k = 0, g = aDens[i][j].size(); k < g; k++) {
+ EXPECT_EQ(aDens[i][j][k], bDens[i][j][k]);
+ }
+ }
+ }
+}
+
// Check the coefficients (in order) of two quasipolynomials.
// Note that this is not a true equality check.
-inline void EXPECT_EQ_REPR_QUASIPOLYNOMIAL(QuasiPolynomial a, QuasiPolynomial b) {
+inline void EXPECT_EQ_REPR_QUASIPOLYNOMIAL(QuasiPolynomial a,
+ QuasiPolynomial b) {
EXPECT_EQ(a.getNumInputs(), b.getNumInputs());
SmallVector<Fraction> aCoeffs = a.getCoefficients(),
diff --git a/openmp/CMakeLists.txt b/openmp/CMakeLists.txt
index 307c8dbbc0c3..c1c79f8e0ca9 100644
--- a/openmp/CMakeLists.txt
+++ b/openmp/CMakeLists.txt
@@ -94,7 +94,8 @@ set(ENABLE_LIBOMPTARGET ON)
# Since the device plugins are only supported on Linux anyway,
# there is no point in trying to compile libomptarget on other OSes.
# 32-bit systems are not supported either.
-if (APPLE OR WIN32 OR WASM OR NOT "cxx_std_17" IN_LIST CMAKE_CXX_COMPILE_FEATURES OR NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
+if (APPLE OR WIN32 OR WASM OR NOT "cxx_std_17" IN_LIST CMAKE_CXX_COMPILE_FEATURES
+ OR NOT CMAKE_SIZEOF_VOID_P EQUAL 8 OR ${CMAKE_SYSTEM_NAME} MATCHES "AIX")
set(ENABLE_LIBOMPTARGET OFF)
endif()
diff --git a/openmp/cmake/OpenMPTesting.cmake b/openmp/cmake/OpenMPTesting.cmake
index a771efdf9e69..df41956dadd4 100644
--- a/openmp/cmake/OpenMPTesting.cmake
+++ b/openmp/cmake/OpenMPTesting.cmake
@@ -55,6 +55,9 @@ if (${OPENMP_STANDALONE_BUILD})
if (MSVC OR XCODE)
set(DEFAULT_LIT_ARGS "${DEFAULT_LIT_ARGS} --no-progress-bar")
endif()
+ if (${CMAKE_SYSTEM_NAME} MATCHES "AIX")
+ set(DEFAULT_LIT_ARGS "${DEFAULT_LIT_ARGS} --time-tests --timeout=1800")
+ endif()
set(OPENMP_LIT_ARGS "${DEFAULT_LIT_ARGS}" CACHE STRING "Options for lit.")
separate_arguments(OPENMP_LIT_ARGS)
else()
diff --git a/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt b/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
index bbf8c0a50f85..68ce63467a6c 100644
--- a/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
+++ b/openmp/libomptarget/plugins-nextgen/amdgpu/CMakeLists.txt
@@ -35,14 +35,6 @@ add_definitions(-DTARGET_NAME=AMDGPU)
# requires changing the original plugins.
add_definitions(-DDEBUG_PREFIX="TARGET AMDGPU RTL")
-if(CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc64le)|(aarch64)$")
- add_definitions(-DLITTLEENDIAN_CPU=1)
-endif()
-
-if(CMAKE_BUILD_TYPE MATCHES Debug)
- add_definitions(-DDEBUG)
-endif()
-
set(LIBOMPTARGET_DLOPEN_LIBHSA OFF)
option(LIBOMPTARGET_FORCE_DLOPEN_LIBHSA "Build with dlopened libhsa" ${LIBOMPTARGET_DLOPEN_LIBHSA})
diff --git a/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp b/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp
index 0411c6701334..b67642e9e1bc 100644
--- a/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp
+++ b/openmp/libomptarget/plugins-nextgen/amdgpu/src/rtl.cpp
@@ -43,6 +43,18 @@
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
+#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \
+ !defined(__ORDER_BIG_ENDIAN__)
+#error "Missing preprocessor definitions for endianness detection."
+#endif
+
+// The HSA headers require these definitions.
+#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define LITTLEENDIAN_CPU
+#elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+#define BIGENDIAN_CPU
+#endif
+
#if defined(__has_include)
#if __has_include("hsa/hsa.h")
#include "hsa/hsa.h"
@@ -3203,6 +3215,7 @@ Error AMDGPUKernelTy::launchImpl(GenericDeviceTy &GenericDevice,
ImplArgs->GroupSizeY = 1;
ImplArgs->GroupSizeZ = 1;
ImplArgs->GridDims = 1;
+ ImplArgs->DynamicLdsSize = KernelArgs.DynCGroupMem;
}
// Push the kernel launch into the stream.
diff --git a/openmp/libomptarget/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h b/openmp/libomptarget/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
index 2471590c27b3..58a3b5df00fa 100644
--- a/openmp/libomptarget/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
+++ b/openmp/libomptarget/plugins-nextgen/amdgpu/utils/UtilitiesRTL.h
@@ -45,7 +45,9 @@ struct AMDGPUImplicitArgsTy {
uint16_t GroupSizeZ;
uint8_t Unused0[46]; // 46 byte offset.
uint16_t GridDims;
- uint8_t Unused1[190]; // 190 byte offset.
+ uint8_t Unused1[54]; // 54 byte offset.
+ uint32_t DynamicLdsSize;
+ uint8_t Unused2[132]; // 132 byte offset.
};
// Dummy struct for COV4 implicitargs.
diff --git a/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp b/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp
index be9ace571f54..9490e58fc669 100644
--- a/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/openmp/libomptarget/plugins-nextgen/common/src/PluginInterface.cpp
@@ -1398,6 +1398,7 @@ Expected<void *> GenericDeviceTy::dataAlloc(int64_t Size, void *HostPtr,
switch (Kind) {
case TARGET_ALLOC_DEFAULT:
+ case TARGET_ALLOC_DEVICE_NON_BLOCKING:
case TARGET_ALLOC_DEVICE:
if (MemoryManager) {
Alloc = MemoryManager->allocate(Size, HostPtr);
diff --git a/openmp/runtime/CMakeLists.txt b/openmp/runtime/CMakeLists.txt
index 80064170db19..041b60efac5c 100644
--- a/openmp/runtime/CMakeLists.txt
+++ b/openmp/runtime/CMakeLists.txt
@@ -30,7 +30,7 @@ if(${OPENMP_STANDALONE_BUILD})
# If adding a new architecture, take a look at cmake/LibompGetArchitecture.cmake
libomp_get_architecture(LIBOMP_DETECTED_ARCH)
set(LIBOMP_ARCH ${LIBOMP_DETECTED_ARCH} CACHE STRING
- "The architecture to build for (x86_64/i386/arm/ppc64/ppc64le/aarch64/mic/mips/mips64/riscv64/loongarch64/ve/s390x/wasm32).")
+ "The architecture to build for (x86_64/i386/arm/ppc/ppc64/ppc64le/aarch64/mic/mips/mips64/riscv64/loongarch64/ve/s390x/wasm32).")
# Should assertions be enabled? They are on by default.
set(LIBOMP_ENABLE_ASSERTIONS TRUE CACHE BOOL
"enable assertions?")
@@ -51,8 +51,10 @@ else() # Part of LLVM build
set(LIBOMP_ARCH x86_64)
elseif(LIBOMP_NATIVE_ARCH MATCHES "powerpc64le")
set(LIBOMP_ARCH ppc64le)
- elseif(LIBOMP_NATIVE_ARCH MATCHES "powerpc")
+ elseif(LIBOMP_NATIVE_ARCH MATCHES "powerpc64")
set(LIBOMP_ARCH ppc64)
+ elseif(LIBOMP_NATIVE_ARCH MATCHES "powerpc")
+ set(LIBOMP_ARCH ppc)
elseif(LIBOMP_NATIVE_ARCH MATCHES "aarch64")
set(LIBOMP_ARCH aarch64)
elseif(LIBOMP_NATIVE_ARCH MATCHES "arm64")
@@ -89,7 +91,7 @@ if(LIBOMP_ARCH STREQUAL "aarch64")
endif()
endif()
-libomp_check_variable(LIBOMP_ARCH 32e x86_64 32 i386 arm ppc64 ppc64le aarch64 aarch64_a64fx mic mips mips64 riscv64 loongarch64 ve s390x wasm32)
+libomp_check_variable(LIBOMP_ARCH 32e x86_64 32 i386 arm ppc ppc64 ppc64le aarch64 aarch64_a64fx mic mips mips64 riscv64 loongarch64 ve s390x wasm32)
set(LIBOMP_LIB_TYPE normal CACHE STRING
"Performance,Profiling,Stubs library (normal/profile/stubs)")
@@ -128,8 +130,14 @@ set(LIBOMP_ASMFLAGS "" CACHE STRING
"Appended user specified assembler flags.")
set(LIBOMP_LDFLAGS "" CACHE STRING
"Appended user specified linker flags.")
-set(LIBOMP_LIBFLAGS "" CACHE STRING
- "Appended user specified linked libs flags. (e.g., -lm)")
+if("${LIBOMP_ARCH}" STREQUAL "ppc" AND ${CMAKE_SYSTEM_NAME} MATCHES "AIX")
+ # PPC (32-bit) on AIX needs libatomic for __atomic_load_8, etc.
+ set(LIBOMP_LIBFLAGS "-latomic" CACHE STRING
+ "Appended user specified linked libs flags. (e.g., -lm)")
+else()
+ set(LIBOMP_LIBFLAGS "" CACHE STRING
+ "Appended user specified linked libs flags. (e.g., -lm)")
+endif()
set(LIBOMP_FFLAGS "" CACHE STRING
"Appended user specified Fortran compiler flags. These are only used if LIBOMP_FORTRAN_MODULES==TRUE.")
@@ -171,12 +179,15 @@ set(LOONGARCH64 FALSE)
set(VE FALSE)
set(S390X FALSE)
set(WASM FALSE)
+set(PPC FALSE)
if("${LIBOMP_ARCH}" STREQUAL "i386" OR "${LIBOMP_ARCH}" STREQUAL "32") # IA-32 architecture
set(IA32 TRUE)
elseif("${LIBOMP_ARCH}" STREQUAL "x86_64" OR "${LIBOMP_ARCH}" STREQUAL "32e") # Intel(R) 64 architecture
set(INTEL64 TRUE)
elseif("${LIBOMP_ARCH}" STREQUAL "arm") # ARM architecture
set(ARM TRUE)
+elseif("${LIBOMP_ARCH}" STREQUAL "ppc") # PPC32 architecture
+ set(PPC TRUE)
elseif("${LIBOMP_ARCH}" STREQUAL "ppc64") # PPC64BE architecture
set(PPC64BE TRUE)
set(PPC64 TRUE)
diff --git a/openmp/runtime/cmake/LibompGetArchitecture.cmake b/openmp/runtime/cmake/LibompGetArchitecture.cmake
index cd85267020bd..d7f81870f9ef 100644
--- a/openmp/runtime/cmake/LibompGetArchitecture.cmake
+++ b/openmp/runtime/cmake/LibompGetArchitecture.cmake
@@ -41,6 +41,8 @@ function(libomp_get_architecture return_arch)
#error ARCHITECTURE=ppc64le
#elif defined(__powerpc64__)
#error ARCHITECTURE=ppc64
+ #elif defined(__powerpc__) && !defined(__powerpc64__)
+ #error ARCHITECTURE=ppc
#elif defined(__mips__) && defined(__mips64)
#error ARCHITECTURE=mips64
#elif defined(__mips__) && !defined(__mips64)
diff --git a/openmp/runtime/cmake/config-ix.cmake b/openmp/runtime/cmake/config-ix.cmake
index 90b9af4cd382..76f471a44380 100644
--- a/openmp/runtime/cmake/config-ix.cmake
+++ b/openmp/runtime/cmake/config-ix.cmake
@@ -333,7 +333,8 @@ else()
(LIBOMP_ARCH STREQUAL loongarch64) OR
(LIBOMP_ARCH STREQUAL s390x))
AND # OS supported?
- ((WIN32 AND LIBOMP_HAVE_PSAPI) OR APPLE OR (NOT WIN32 AND LIBOMP_HAVE_WEAK_ATTRIBUTE)))
+ ((WIN32 AND LIBOMP_HAVE_PSAPI) OR APPLE OR
+ (NOT (WIN32 OR ${CMAKE_SYSTEM_NAME} MATCHES "AIX") AND LIBOMP_HAVE_WEAK_ATTRIBUTE)))
set(LIBOMP_HAVE_OMPT_SUPPORT TRUE)
else()
set(LIBOMP_HAVE_OMPT_SUPPORT FALSE)
diff --git a/openmp/runtime/src/CMakeLists.txt b/openmp/runtime/src/CMakeLists.txt
index 8b2445ac58bf..619d4f7ba458 100644
--- a/openmp/runtime/src/CMakeLists.txt
+++ b/openmp/runtime/src/CMakeLists.txt
@@ -108,7 +108,11 @@ else()
# Unix specific files
libomp_append(LIBOMP_CXXFILES z_Linux_util.cpp)
libomp_append(LIBOMP_CXXFILES kmp_gsupport.cpp)
- libomp_append(LIBOMP_GNUASMFILES z_Linux_asm.S) # Unix assembly file
+ if(${CMAKE_SYSTEM_NAME} MATCHES "AIX")
+ libomp_append(LIBOMP_GNUASMFILES z_AIX_asm.S) # AIX assembly file
+ else()
+ libomp_append(LIBOMP_GNUASMFILES z_Linux_asm.S) # Unix assembly file
+ endif()
endif()
libomp_append(LIBOMP_CXXFILES thirdparty/ittnotify/ittnotify_static.cpp LIBOMP_USE_ITT_NOTIFY)
libomp_append(LIBOMP_CXXFILES kmp_debugger.cpp LIBOMP_USE_DEBUGGER)
diff --git a/openmp/runtime/src/kmp.h b/openmp/runtime/src/kmp.h
index 3dbf8c71c48d..c287a31e0b1b 100644
--- a/openmp/runtime/src/kmp.h
+++ b/openmp/runtime/src/kmp.h
@@ -1192,6 +1192,9 @@ extern void __kmp_init_target_task();
// Minimum stack size for pthread for VE is 4MB.
// https://www.hpc.nec/documents/veos/en/glibc/Difference_Points_glibc.htm
#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
+#elif KMP_OS_AIX
+// The default stack size for worker threads on AIX is 4MB.
+#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
#else
#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
#endif
@@ -1354,6 +1357,10 @@ extern kmp_uint64 __kmp_now_nsec();
/* TODO: tune for KMP_OS_WASI */
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
+#elif KMP_OS_AIX
+/* TODO: tune for KMP_OS_AIX */
+#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
+#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#endif
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
diff --git a/openmp/runtime/src/kmp_config.h.cmake b/openmp/runtime/src/kmp_config.h.cmake
index 5f04301c91c6..b0cd0ed296e7 100644
--- a/openmp/runtime/src/kmp_config.h.cmake
+++ b/openmp/runtime/src/kmp_config.h.cmake
@@ -100,7 +100,7 @@
#define ENABLE_LIBOMPTARGET OPENMP_ENABLE_LIBOMPTARGET
// Configured cache line based on architecture
-#if KMP_ARCH_PPC64
+#if KMP_ARCH_PPC64 || KMP_ARCH_PPC
# define CACHE_LINE 128
#elif KMP_ARCH_AARCH64_A64FX
# define CACHE_LINE 256
diff --git a/openmp/runtime/src/kmp_ftn_entry.h b/openmp/runtime/src/kmp_ftn_entry.h
index d54c5bfd10fe..713561734c48 100644
--- a/openmp/runtime/src/kmp_ftn_entry.h
+++ b/openmp/runtime/src/kmp_ftn_entry.h
@@ -582,7 +582,7 @@ int FTN_STDCALL KMP_EXPAND_NAME(FTN_GET_THREAD_NUM)(void) {
int gtid;
#if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
- KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS
+ KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
gtid = __kmp_entry_gtid();
#elif KMP_OS_WINDOWS
if (!__kmp_init_parallel ||
diff --git a/openmp/runtime/src/kmp_global.cpp b/openmp/runtime/src/kmp_global.cpp
index b132f38fd3b0..5017cd3de4be 100644
--- a/openmp/runtime/src/kmp_global.cpp
+++ b/openmp/runtime/src/kmp_global.cpp
@@ -172,7 +172,7 @@ int __kmp_ncores = 0;
int __kmp_chunk = 0;
int __kmp_force_monotonic = 0;
int __kmp_abort_delay = 0;
-#if KMP_OS_LINUX && defined(KMP_TDATA_GTID)
+#if (KMP_OS_LINUX || KMP_OS_AIX) && defined(KMP_TDATA_GTID)
int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */
int __kmp_adjust_gtid_mode = FALSE;
#elif KMP_OS_WINDOWS
diff --git a/openmp/runtime/src/kmp_gsupport.cpp b/openmp/runtime/src/kmp_gsupport.cpp
index 78af39533549..88189659a234 100644
--- a/openmp/runtime/src/kmp_gsupport.cpp
+++ b/openmp/runtime/src/kmp_gsupport.cpp
@@ -357,7 +357,8 @@ void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) {
// They come in two flavors: 64-bit unsigned, and either 32-bit signed
// (IA-32 architecture) or 64-bit signed (Intel(R) 64).
-#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM
+#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM || \
+ KMP_ARCH_PPC
#define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
#define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
#define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
diff --git a/openmp/runtime/src/kmp_os.h b/openmp/runtime/src/kmp_os.h
index 4ffe9f2d8c95..6862fd89b630 100644
--- a/openmp/runtime/src/kmp_os.h
+++ b/openmp/runtime/src/kmp_os.h
@@ -176,7 +176,8 @@ typedef unsigned long long kmp_uint64;
#define KMP_UINT64_SPEC "llu"
#endif /* KMP_OS_UNIX */
-#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM
+#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM || \
+ KMP_ARCH_PPC
#define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
#elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \
KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
@@ -186,7 +187,7 @@ typedef unsigned long long kmp_uint64;
#error "Can't determine size_t printf format specifier."
#endif
-#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_WASM
+#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_WASM || KMP_ARCH_PPC
#define KMP_SIZE_T_MAX (0xFFFFFFFF)
#else
#define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
@@ -1046,7 +1047,7 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
#if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \
KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
- KMP_ARCH_VE || KMP_ARCH_S390X
+ KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC
#if KMP_OS_WINDOWS
#undef KMP_MB
#define KMP_MB() std::atomic_thread_fence(std::memory_order_seq_cst)
@@ -1146,7 +1147,7 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \
(kmp_int64)(b), (kmp_int64)(c))
-#if KMP_ARCH_X86 || KMP_ARCH_MIPS || KMP_ARCH_WASM
+#if KMP_ARCH_X86 || KMP_ARCH_MIPS || KMP_ARCH_WASM || KMP_ARCH_PPC
// What about ARM?
#define TCR_PTR(a) ((void *)TCR_4(a))
#define TCW_PTR(a, b) TCW_4((a), (b))
diff --git a/openmp/runtime/src/kmp_platform.h b/openmp/runtime/src/kmp_platform.h
index 45f411b9c219..c06f46db2d49 100644
--- a/openmp/runtime/src/kmp_platform.h
+++ b/openmp/runtime/src/kmp_platform.h
@@ -82,15 +82,20 @@
#define KMP_OS_WASI 1
#endif
+#if (defined _AIX)
+#undef KMP_OS_AIX
+#define KMP_OS_AIX 1
+#endif
+
#if (1 != KMP_OS_LINUX + KMP_OS_DRAGONFLY + KMP_OS_FREEBSD + KMP_OS_NETBSD + \
KMP_OS_OPENBSD + KMP_OS_DARWIN + KMP_OS_WINDOWS + KMP_OS_HURD + \
- KMP_OS_SOLARIS + KMP_OS_WASI)
+ KMP_OS_SOLARIS + KMP_OS_WASI + KMP_OS_AIX)
#error Unknown OS
#endif
#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
KMP_OS_OPENBSD || KMP_OS_DARWIN || KMP_OS_HURD || KMP_OS_SOLARIS || \
- KMP_OS_WASI
+ KMP_OS_WASI || KMP_OS_AIX
#undef KMP_OS_UNIX
#define KMP_OS_UNIX 1
#endif
@@ -102,7 +107,8 @@
#define KMP_ARCH_AARCH64 0
#define KMP_ARCH_PPC64_ELFv1 0
#define KMP_ARCH_PPC64_ELFv2 0
-#define KMP_ARCH_PPC64 (KMP_ARCH_PPC64_ELFv2 || KMP_ARCH_PPC64_ELFv1)
+#define KMP_ARCH_PPC64_XCOFF 0
+#define KMP_ARCH_PPC_XCOFF 0
#define KMP_ARCH_MIPS 0
#define KMP_ARCH_MIPS64 0
#define KMP_ARCH_RISCV64 0
@@ -134,13 +140,23 @@
#undef KMP_ARCH_X86
#define KMP_ARCH_X86 1
#elif defined __powerpc64__
-#if defined(_CALL_ELF) && _CALL_ELF == 2
+#if defined(_CALL_ELF)
+#if _CALL_ELF == 2
#undef KMP_ARCH_PPC64_ELFv2
#define KMP_ARCH_PPC64_ELFv2 1
#else
#undef KMP_ARCH_PPC64_ELFv1
#define KMP_ARCH_PPC64_ELFv1 1
#endif
+#elif defined KMP_OS_AIX
+#undef KMP_ARCH_PPC64_XCOFF
+#define KMP_ARCH_PPC64_XCOFF 1
+#endif
+#elif defined(__powerpc__) && defined(KMP_OS_AIX)
+#undef KMP_ARCH_PPC_XCOFF
+#define KMP_ARCH_PPC_XCOFF 1
+#undef KMP_ARCH_PPC
+#define KMP_ARCH_PPC 1
#elif defined __aarch64__
#undef KMP_ARCH_AARCH64
#define KMP_ARCH_AARCH64 1
@@ -207,6 +223,9 @@
#define KMP_ARCH_WASM 1
#endif
+#define KMP_ARCH_PPC64 \
+ (KMP_ARCH_PPC64_ELFv2 || KMP_ARCH_PPC64_ELFv1 || KMP_ARCH_PPC64_XCOFF)
+
#if defined(__MIC__) || defined(__MIC2__)
#define KMP_MIC 1
#if __MIC2__ || __KNC__
@@ -224,7 +243,8 @@
/* Specify 32 bit architectures here */
#define KMP_32_BIT_ARCH \
- (KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM)
+ (KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM || \
+ KMP_ARCH_PPC)
// Platforms which support Intel(R) Many Integrated Core Architecture
#define KMP_MIC_SUPPORTED \
@@ -234,7 +254,7 @@
#if (1 != KMP_ARCH_X86 + KMP_ARCH_X86_64 + KMP_ARCH_ARM + KMP_ARCH_PPC64 + \
KMP_ARCH_AARCH64 + KMP_ARCH_MIPS + KMP_ARCH_MIPS64 + \
KMP_ARCH_RISCV64 + KMP_ARCH_LOONGARCH64 + KMP_ARCH_VE + \
- KMP_ARCH_S390X + KMP_ARCH_WASM)
+ KMP_ARCH_S390X + KMP_ARCH_WASM + KMP_ARCH_PPC)
#error Unknown or unsupported architecture
#endif
diff --git a/openmp/runtime/src/kmp_runtime.cpp b/openmp/runtime/src/kmp_runtime.cpp
index 4e1074a893a2..fc5e8405a415 100644
--- a/openmp/runtime/src/kmp_runtime.cpp
+++ b/openmp/runtime/src/kmp_runtime.cpp
@@ -8901,7 +8901,7 @@ __kmp_determine_reduction_method(
#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD || \
- KMP_OS_SOLARIS || KMP_OS_WASI
+ KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
int teamsize_cutoff = 4;
@@ -8926,14 +8926,14 @@ __kmp_determine_reduction_method(
#error "Unknown or unsupported OS"
#endif // KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||
// KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD ||
- // KMP_OS_SOLARIS || KMP_OS_WASI
+ // KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
#elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS || \
- KMP_ARCH_WASM
+ KMP_ARCH_WASM || KMP_ARCH_PPC
#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_HURD || KMP_OS_SOLARIS || \
- KMP_OS_WASI
+ KMP_OS_WASI || KMP_OS_AIX
// basic tuning
diff --git a/openmp/runtime/src/kmp_settings.cpp b/openmp/runtime/src/kmp_settings.cpp
index e731bf45e8ee..30a4c05fe76b 100644
--- a/openmp/runtime/src/kmp_settings.cpp
+++ b/openmp/runtime/src/kmp_settings.cpp
@@ -6171,9 +6171,9 @@ void __kmp_env_initialize(char const *string) {
// specifier, even as substrings.
//
// I can't find a case-insensitive version of strstr on Windows* OS.
- // Use the case-sensitive version for now.
+ // Use the case-sensitive version for now. AIX does the same.
-#if KMP_OS_WINDOWS
+#if KMP_OS_WINDOWS || KMP_OS_AIX
#define FIND strstr
#else
#define FIND strcasestr
diff --git a/openmp/runtime/src/kmp_wrapper_getpid.h b/openmp/runtime/src/kmp_wrapper_getpid.h
index f9d7f4804fbc..d31c6e80f75d 100644
--- a/openmp/runtime/src/kmp_wrapper_getpid.h
+++ b/openmp/runtime/src/kmp_wrapper_getpid.h
@@ -17,7 +17,9 @@
// On Unix-like systems (Linux* OS and OS X*) getpid() is declared in standard
// headers.
+#if !defined(KMP_OS_AIX)
#include <sys/syscall.h>
+#endif
#include <sys/types.h>
#include <unistd.h>
#if KMP_OS_DARWIN
@@ -31,6 +33,9 @@
#define __kmp_gettid() _lwp_self()
#elif KMP_OS_OPENBSD
#define __kmp_gettid() getthrid()
+#elif KMP_OS_AIX
+#include <pthread.h>
+#define __kmp_gettid() pthread_self()
#elif defined(SYS_gettid)
// Hopefully other Unix systems define SYS_gettid syscall for getting os thread
// id
diff --git a/openmp/runtime/src/z_Linux_util.cpp b/openmp/runtime/src/z_Linux_util.cpp
index c2df8895e887..f01fa647c4d4 100644
--- a/openmp/runtime/src/z_Linux_util.cpp
+++ b/openmp/runtime/src/z_Linux_util.cpp
@@ -29,7 +29,9 @@
#include <semaphore.h>
#endif // KMP_OS_LINUX
#include <sys/resource.h>
+#if !KMP_OS_AIX
#include <sys/syscall.h>
+#endif
#include <sys/time.h>
#include <sys/times.h>
#include <unistd.h>
@@ -1832,7 +1834,7 @@ static int __kmp_get_xproc(void) {
__kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
- KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI
+ KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
__kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
@@ -2210,9 +2212,9 @@ int __kmp_is_address_mapped(void *addr) {
}
#elif KMP_OS_WASI
found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
-#elif KMP_OS_DRAGONFLY || KMP_OS_SOLARIS
+#elif KMP_OS_DRAGONFLY || KMP_OS_SOLARIS || KMP_OS_AIX
- // FIXME(DragonFly, Solaris): Implement this
+ // FIXME(DragonFly, Solaris, AIX): Implement this
found = 1;
#else
@@ -2317,7 +2319,7 @@ int __kmp_get_load_balance(int max) {
// Open "/proc/" directory.
proc_dir = opendir("/proc");
if (proc_dir == NULL) {
- // Cannot open "/prroc/". Probably the kernel does not support it. Return an
+ // Cannot open "/proc/". Probably the kernel does not support it. Return an
// error now and in subsequent calls.
running_threads = -1;
permanent_error = 1;
@@ -2330,9 +2332,14 @@ int __kmp_get_load_balance(int max) {
proc_entry = readdir(proc_dir);
while (proc_entry != NULL) {
+#if KMP_OS_AIX
+ // Proc entry name starts with a digit. Assume it is a process' directory.
+ if (isdigit(proc_entry->d_name[0])) {
+#else
// Proc entry is a directory and name starts with a digit. Assume it is a
// process' directory.
if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
+#endif
#ifdef KMP_DEBUG
++total_processes;
@@ -2376,7 +2383,11 @@ int __kmp_get_load_balance(int max) {
task_entry = readdir(task_dir);
while (task_entry != NULL) {
// It is a directory and name starts with a digit.
+#if KMP_OS_AIX
+ if (isdigit(task_entry->d_name[0])) {
+#else
if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
+#endif
// Construct complete stat file path. Easiest way would be:
// __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
@@ -2486,7 +2497,7 @@ finish: // Clean up and exit.
#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
- KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X)
+ KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF)
// we really only need the case with 1 argument, because CLANG always build
// a struct of pointers to shared variables referenced in the outlined function
diff --git a/openmp/runtime/test/lit.cfg b/openmp/runtime/test/lit.cfg
index 27ff057c85f6..4a457f4cc41f 100644
--- a/openmp/runtime/test/lit.cfg
+++ b/openmp/runtime/test/lit.cfg
@@ -108,6 +108,18 @@ if config.has_ompt:
if config.has_ompx_taskgraph:
config.available_features.add("ompx_taskgraph")
+if config.operating_system == 'AIX':
+ config.available_features.add("aix")
+ object_mode = os.environ.get('OBJECT_MODE', '32')
+ if object_mode == '64':
+ config.test_flags += " -m64"
+ elif object_mode == '32':
+ # Set user data area to 2GB since the default size 256MB in 32-bit mode
+ # is not sufficient to run LIT tests on systems that have a lot of
+ # CPUs when creating one worker thread for each CPU and each worker
+ # thread uses 4MB stack size.
+ config.test_flags += " -Wl,-bmaxdata:0x80000000"
+
if 'Linux' in config.operating_system:
config.available_features.add("linux")
diff --git a/polly/lib/Transform/ScheduleOptimizer.cpp b/polly/lib/Transform/ScheduleOptimizer.cpp
index 35a0a4def040..8ee2b66339ad 100644
--- a/polly/lib/Transform/ScheduleOptimizer.cpp
+++ b/polly/lib/Transform/ScheduleOptimizer.cpp
@@ -96,6 +96,13 @@ static cl::opt<std::string>
cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
cl::init("yes"), cl::cat(PollyCategory));
+static cl::opt<int>
+ ScheduleComputeOut("polly-schedule-computeout",
+ cl::desc("Bound the scheduler by maximal amount"
+ "of computational steps. "),
+ cl::Hidden, cl::init(300000), cl::ZeroOrMore,
+ cl::cat(PollyCategory));
+
static cl::opt<bool>
GreedyFusion("polly-loopfusion-greedy",
cl::desc("Aggressively try to fuse everything"), cl::Hidden,
@@ -860,7 +867,25 @@ static void runIslScheduleOptimizer(
SC = SC.set_proximity(Proximity);
SC = SC.set_validity(Validity);
SC = SC.set_coincidence(Validity);
+
+ // Save error handling behavior
+ long MaxOperations = isl_ctx_get_max_operations(Ctx);
+ isl_ctx_set_max_operations(Ctx, ScheduleComputeOut);
Schedule = SC.compute_schedule();
+ bool ScheduleQuota = false;
+ if (isl_ctx_last_error(Ctx) == isl_error_quota) {
+ isl_ctx_reset_error(Ctx);
+ LLVM_DEBUG(
+ dbgs() << "Schedule optimizer calculation exceeds ISL quota\n");
+ ScheduleQuota = true;
+ }
+ isl_options_set_on_error(Ctx, ISL_ON_ERROR_ABORT);
+ isl_ctx_reset_operations(Ctx);
+ isl_ctx_set_max_operations(Ctx, MaxOperations);
+
+ if (ScheduleQuota)
+ return;
+
isl_options_set_on_error(Ctx, OnErrorStatus);
ScopsRescheduled++;
diff --git a/polly/test/ScheduleOptimizer/schedule_computeout.ll b/polly/test/ScheduleOptimizer/schedule_computeout.ll
new file mode 100644
index 000000000000..eb59f0e36ac6
--- /dev/null
+++ b/polly/test/ScheduleOptimizer/schedule_computeout.ll
@@ -0,0 +1,99 @@
+; RUN: opt %loadPolly -S -polly-optree -polly-delicm -polly-opt-isl -polly-schedule-computeout=100000 -debug-only="polly-opt-isl" < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; Bailout if the computations of schedule compute exceeds the max scheduling quota.
+; Max compute out is initialized to 300000, Here it is set to 100000 for test purpose.
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64-unknown-linux-gnu"
+
+@a = dso_local local_unnamed_addr global ptr null, align 8
+@b = dso_local local_unnamed_addr global ptr null, align 8
+@c = dso_local local_unnamed_addr global ptr null, align 8
+
+define dso_local void @foo(i32 noundef %I, i32 noundef %J, i32 noundef %K1, i32 noundef %K2, i32 noundef %L1, i32 noundef %L2) local_unnamed_addr {
+entry:
+ %j = alloca i32, align 4
+ store volatile i32 0, ptr %j, align 4
+ %j.0.j.0.j.0.54 = load volatile i32, ptr %j, align 4
+ %cmp55 = icmp slt i32 %j.0.j.0.j.0.54, %J
+ br i1 %cmp55, label %for.body.lr.ph, label %for.cond.cleanup
+
+for.body.lr.ph: ; preds = %entry
+ %0 = load ptr, ptr @a, align 8
+ %1 = load ptr, ptr @b, align 8
+ %2 = load ptr, ptr %1, align 8
+ %cmp352 = icmp slt i32 %L1, %L2
+ %cmp750 = icmp slt i32 %K1, %K2
+ %3 = sext i32 %K1 to i64
+ %4 = sext i32 %L1 to i64
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.cond.cleanup4, %entry
+ ret void
+
+for.body: ; preds = %for.cond.cleanup4, %for.body.lr.ph
+ br i1 %cmp352, label %for.cond6.preheader.preheader, label %for.cond.cleanup4
+
+for.cond6.preheader.preheader: ; preds = %for.body
+ %wide.trip.count66 = sext i32 %L2 to i64
+ br label %for.cond6.preheader
+
+for.cond6.preheader: ; preds = %for.cond.cleanup8, %for.cond6.preheader.preheader
+ %indvars.iv61 = phi i64 [ %4, %for.cond6.preheader.preheader ], [ %indvars.iv.next62, %for.cond.cleanup8 ]
+ br i1 %cmp750, label %for.cond10.preheader.lr.ph, label %for.cond.cleanup8
+
+for.cond10.preheader.lr.ph: ; preds = %for.cond6.preheader
+ %5 = mul nsw i64 %indvars.iv61, 516
+ %6 = mul nsw i64 %indvars.iv61, 516
+ %wide.trip.count = sext i32 %K2 to i64
+ br label %for.cond10.preheader
+
+for.cond.cleanup4: ; preds = %for.cond.cleanup8, %for.body
+ %j.0.j.0.j.0.45 = load volatile i32, ptr %j, align 4
+ %inc34 = add nsw i32 %j.0.j.0.j.0.45, 1
+ store volatile i32 %inc34, ptr %j, align 4
+ %j.0.j.0.j.0. = load volatile i32, ptr %j, align 4
+ %cmp = icmp slt i32 %j.0.j.0.j.0., %J
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond10.preheader: ; preds = %for.cond.cleanup12, %for.cond10.preheader.lr.ph
+ %indvars.iv = phi i64 [ %3, %for.cond10.preheader.lr.ph ], [ %indvars.iv.next, %for.cond.cleanup12 ]
+ %7 = getelementptr float, ptr %0, i64 %indvars.iv
+ %arrayidx18 = getelementptr float, ptr %7, i64 %5
+ %8 = load float, ptr %arrayidx18, align 4
+ br label %for.cond14.preheader
+
+for.cond.cleanup8: ; preds = %for.cond.cleanup12, %for.cond6.preheader
+ %indvars.iv.next62 = add nsw i64 %indvars.iv61, 1
+ %exitcond67.not = icmp eq i64 %indvars.iv.next62, %wide.trip.count66
+ br i1 %exitcond67.not, label %for.cond.cleanup4, label %for.cond6.preheader
+
+for.cond14.preheader: ; preds = %for.cond.cleanup16, %for.cond10.preheader
+ %m.049 = phi i32 [ -2, %for.cond10.preheader ], [ %inc21, %for.cond.cleanup16 ]
+ %sum.048 = phi float [ 0.000000e+00, %for.cond10.preheader ], [ %add19, %for.cond.cleanup16 ]
+ br label %for.body17
+
+for.cond.cleanup12: ; preds = %for.cond.cleanup16
+ %9 = getelementptr float, ptr %2, i64 %indvars.iv
+ %arrayidx26 = getelementptr float, ptr %9, i64 %6
+ store float %add19, ptr %arrayidx26, align 4
+ %indvars.iv.next = add nsw i64 %indvars.iv, 1
+ %exitcond60.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond60.not, label %for.cond.cleanup8, label %for.cond10.preheader
+
+for.cond.cleanup16: ; preds = %for.body17
+ %inc21 = add nsw i32 %m.049, 1
+ %exitcond56.not = icmp eq i32 %inc21, 3
+ br i1 %exitcond56.not, label %for.cond.cleanup12, label %for.cond14.preheader
+
+for.body17: ; preds = %for.body17, %for.cond14.preheader
+ %n.047 = phi i32 [ -2, %for.cond14.preheader ], [ %inc, %for.body17 ]
+ %sum.146 = phi float [ %sum.048, %for.cond14.preheader ], [ %add19, %for.body17 ]
+ %add19 = fadd float %sum.146, %8
+ %inc = add nsw i32 %n.047, 1
+ %exitcond.not = icmp eq i32 %inc, 3
+ br i1 %exitcond.not, label %for.cond.cleanup16, label %for.body17
+}
+
+; CHECK: Schedule optimizer calculation exceeds ISL quota
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index f035a17833d8..1110daa62a1c 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -2187,7 +2187,7 @@ llvm_target_lib_list = [lib for lib in [
("-gen-callingconv", "lib/Target/X86/X86GenCallingConv.inc"),
("-gen-subtarget", "lib/Target/X86/X86GenSubtargetInfo.inc"),
("-gen-x86-fold-tables -asmwriternum=1", "lib/Target/X86/X86GenFoldTables.inc"),
- ("-gen-x86-EVEX2VEX-tables", "lib/Target/X86/X86GenEVEX2VEXTables.inc"),
+ ("-gen-x86-compress-evex-tables", "lib/Target/X86/X86GenCompressEVEXTables.inc"),
("-gen-exegesis", "lib/Target/X86/X86GenExegesis.inc"),
("-gen-x86-mnemonic-tables -asmwriternum=1", "lib/Target/X86/X86GenMnemonicTables.inc"),
],
@@ -2719,6 +2719,7 @@ cc_library(
]),
hdrs = glob([
"include/llvm/ExecutionEngine/JITLink/*.h",
+ "include/llvm/ExecutionEngine/Orc/*.h",
]),
copts = llvm_copts,
deps = [